// Cloned by Luke Scales on 30 Nov 2019 from World "Character recognition neural network" by "Coding Train" project
// Please leave this clone trail here.
// Port of Character recognition neural network from here:
// https://github.com/CodingTrain/Toy-Neural-Network-JS/tree/master/examples/mnist
// with many modifications
// TODO: classifier working, pre-train model using mnist database or figure out how to train live in browser
//
// --- defined by MNIST - do not change these ---------------------------------------
const PIXELS = 28; // images in data set are tiny
const PIXELSSQUARED = PIXELS * PIXELS;
// number of training and test exemplars in the data set:
const NOTRAIN = 60000;
const NOTEST = 10000;
//--- can modify all these --------------------------------------------------
// no of nodes in network
const noinput = PIXELSSQUARED;
const nohidden = 64;
const nooutput = 10;
const learningrate = 0.1; // default 0.1
// should we train every timestep or not
let do_training = true;
// how many to train and test per timestep
const TRAINPERSTEP = 30;
const TESTPERSTEP = 5;
// multiply it by this to magnify for display
const ZOOMFACTOR = 7;
const ZOOMPIXELS = ZOOMFACTOR * PIXELS;
// 3 rows of
// large image + 50 gap + small image
// 50 gap between rows
const canvaswidth = ( PIXELS + ZOOMPIXELS ) + 50;
const canvasheight = ( ZOOMPIXELS * 3 ) + 100;
const DOODLE_THICK = 18; // thickness of doodle lines
const DOODLE_BLUR = 3; // blur factor applied to doodles
let mnist;
// all data is loaded into this
// mnist.train_images
// mnist.train_labels
// mnist.test_images
// mnist.test_labels
let classifier;
let nn;
let trainrun = 1;
let train_index = 0;
let testrun = 1;
let test_index = 0;
let total_tests = 0;
let total_correct = 0;
// images in LHS:
let doodle, demo;
let doodle_exists = false;
let demo_exists = false;
let mousedrag = false; // are we in the middle of a mouse drag drawing?
// save inputs to global var to inspect
// type these names in console
var train_inputs, test_inputs, demo_inputs, doodle_inputs;
// Matrix.randomize() is changed to point to this. Must be defined by user of Matrix.
function randomWeight()
{
return ( AB.randomFloatAtoB ( -0.5, 0.5 ) );
// Coding Train default is -1 to 1
}
// CSS trick
// make run header bigger
$("#runheaderbox").css ( { "max-height": "95vh" } );
//--- start of AB.msgs structure: ---------------------------------------------------------
// We output a serious of AB.msgs to put data at various places in the run header
var thehtml;
// 1 Doodle header
thehtml = "<hr> <h1> 1. Doodle </h1> Top row: Doodle (left) and shrunk (right). <br> " +
" Draw your doodle in top LHS. <button onclick='wipeDoodle();' class='normbutton' >Clear doodle</button> <br> ";
AB.msg ( thehtml, 1 );
// 2 Doodle variable data (guess)
// 3 Training header
thehtml = "<hr> <h1> 2. Training </h1> Middle row: Training image magnified (left) and original (right). <br> " +
" <button onclick='do_training = false;' class='normbutton' >Stop training</button> <br> ";
AB.msg ( thehtml, 3 );
// 4 variable training data
// 5 Testing header
thehtml = "<h3> Hidden tests </h3> " ;
AB.msg ( thehtml, 5 );
// 6 variable testing data
// 7 Demo header
thehtml = "<hr> <h1> 3. Demo </h1> Bottom row: Test image magnified (left) and original (right). <br>" +
" The network is <i>not</i> trained on any of these images. <br> " +
" <button onclick='makeDemo();' class='normbutton' >Demo test image</button> <br> ";
AB.msg ( thehtml, 7 );
// 8 Demo variable data (random demo ID)
// 9 Demo variable data (changing guess)
const greenspan = "<span style='font-weight:bold; font-size:x-large; color:darkgreen'> " ;
//--- end of AB.msgs structure: ---------------------------------------------------------
function setup()
{
createCanvas ( canvaswidth, canvasheight );
doodle = createGraphics ( ZOOMPIXELS, ZOOMPIXELS ); // doodle on larger canvas
doodle.pixelDensity(1);
// JS load other JS
// maybe have a loading screen while loading the JS and the data set
AB.loadingScreen();
var TFurl = 'https://unpkg.com/@tensorflow/tfjs';
// var MNurl = 'https://unpkg.com/@tensorflow-models/mobilenet';
// var ml5url = 'https://unpkg.com/ml5@0.4.3/dist/ml5.min.js'
$.getScript( TFurl, function() {
console.log('tensorflow.js loaded');
});
var head = document.getElementsByTagName('head')[0];
var script = document.createElement('script');
script.type = 'text/javascript';
script.onload = function() {
console.log('tf inserted');
}
script.src = TFurl;
head.appendChild(script);
var tfVisUrl = "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js";
// "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-vis@1.0.2/dist/tfjs-vis.umd.min.js"
// const dataFileUrl = "uploads/lukescales/data.js";
$.getScript( tfVisUrl, function() {
console.log('tfvis loaded');
run();
});
// import {MnistData} from "uploads/lukescales/model.jsons";
// import('uploads/lukescales/model.jsons')
// .then(module => {
// console.log(module.nistData());
// })
// .catch(err => {
// console.log('nope', err);
// });
// var head = document.getElementsByTagName('head')[0];
// var newScript = document.createElement('script');
// script.type = 'text/javascript';
// script.onload = function() {
// console.log('tf inserted');
// }
// script.src = TFurl;
// head.appendChild(script);
// const canvasEl = '<div id="result_box"><canvas id="chart_box" width="100" height="100"></canvas></div>'
// const insertPoint = document.getElementById('wrapper');
// insertPoint.innerHTML += canvasEl;
// $.getScript( MNurl, function() {
// console.log('mobilenet script loaded');
// });
// $.getScript( ml5url, function() {
// console.log('ml5 script loaded');
// classifier = ml5.imageClassifier('uploads/lukescales/model.json');
// });
const chartjsurl = "https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.4.0/Chart.min.js";
$.getScript ( chartjsurl, function() {
console.log('chartjs loaded');
});
$.getScript ( "/uploads/codingtrain/matrix.js", function()
{
$.getScript ( "/uploads/codingtrain/nn.js", function()
{
$.getScript ( "/uploads/codingtrain/mnist.js", function()
{
console.log ("All JS loaded");
nn = new NeuralNetwork( noinput, nohidden, nooutput );
nn.setLearningRate ( learningrate );
loadData();
console.log(MnistData);
});
});
});
}
// load data set from local file (on this server)
function loadData()
{
loadMNIST ( function(data)
{
mnist = data;
console.log ("All data loaded into mnist object:")
console.log(mnist);
AB.removeLoading(); // if no loading screen exists, this does nothing
});
}
function getImage ( img ) // make a P5 image object from a raw data array
{
let theimage = createImage (PIXELS, PIXELS); // make blank image, then populate it
theimage.loadPixels();
for (let i = 0; i < PIXELSSQUARED ; i++)
{
let bright = img[i];
let index = i * 4;
theimage.pixels[index + 0] = bright;
theimage.pixels[index + 1] = bright;
theimage.pixels[index + 2] = bright;
theimage.pixels[index + 3] = 255;
}
theimage.updatePixels();
return theimage;
}
function getInputs ( img ) // convert img array into normalised input array
{
let inputs = [];
for (let i = 0; i < PIXELSSQUARED ; i++)
{
let bright = img[i];
inputs[i] = bright / 255; // normalise to 0 to 1
}
return ( inputs );
}
function trainit (show) // train the network with a single exemplar, from global var "train_index", show visual on or off
{
let img = mnist.train_images[train_index];
let label = mnist.train_labels[train_index];
// optional - show visual of the image
if (show)
{
var theimage = getImage ( img ); // get image from data array
image ( theimage, 0, ZOOMPIXELS+50, ZOOMPIXELS, ZOOMPIXELS ); // magnified
image ( theimage, ZOOMPIXELS+50, ZOOMPIXELS+50, PIXELS, PIXELS ); // original
}
// set up the inputs
let inputs = getInputs ( img ); // get inputs from data array
// set up the outputs
let targets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
targets[label] = 1; // change one output location to 1, the rest stay at 0
// console.log(train_index);
// console.log(inputs);
// console.log(targets);
train_inputs = inputs; // can inspect in console
nn.train ( inputs, targets );
thehtml = " trainrun: " + trainrun + "<br> no: " + train_index ;
AB.msg ( thehtml, 4 );
train_index++;
if ( train_index == NOTRAIN )
{
train_index = 0;
console.log( "finished trainrun: " + trainrun );
trainrun++;
}
}
function testit() // test the network with a single exemplar, from global var "test_index"
{
let img = mnist.test_images[test_index];
let label = mnist.test_labels[test_index];
// set up the inputs
let inputs = getInputs ( img );
test_inputs = inputs; // can inspect in console
let prediction = nn.predict(inputs); // array of outputs
let guess = findMax(prediction); // the top output
total_tests++;
if (guess == label) total_correct++;
let percent = (total_correct / total_tests) * 100 ;
thehtml = " testrun: " + testrun + "<br> no: " + total_tests + " <br> " +
" correct: " + total_correct + "<br>" +
" score: " + greenspan + percent.toFixed(2) + "</span>";
AB.msg ( thehtml, 6 );
test_index++;
if ( test_index == NOTEST )
{
console.log( "finished testrun: " + testrun + " score: " + percent.toFixed(2) );
testrun++;
test_index = 0;
total_tests = 0;
total_correct = 0;
}
}
//--- find no.1 (and maybe no.2) output nodes ---------------------------------------
// (restriction) assumes array values start at 0 (which is true for output nodes)
function find12 (a) // return array showing indexes of no.1 and no.2 values in array
{
let no1 = 0;
let no2 = 0;
let no1value = 0;
let no2value = 0;
for (let i = 0; i < a.length; i++)
{
if (a[i] > no1value)
{
no1 = i;
no1value = a[i];
}
else if (a[i] > no2value)
{
no2 = i;
no2value = a[i];
}
}
var b = [ no1, no2 ];
return b;
}
// just get the maximum - separate function for speed - done many times
// find our guess - the max of the output nodes array
function findMax (a)
{
let no1 = 0;
let no1value = 0;
for (let i = 0; i < a.length; i++)
{
if (a[i] > no1value)
{
no1 = i;
no1value = a[i];
}
}
return no1;
}
// --- the draw function -------------------------------------------------------------
// every step:
var clickX = new Array();
var clickY = new Array();
var clickD = new Array();
function draw()
{
// check if libraries and data loaded yet:
if ( typeof mnist == 'undefined' ) return;
// how can we get white doodle on black background on yellow canvas?
// background('#ffffcc'); doodle.background('black');
background ('black');
if ( do_training )
{
// do some training per step
for (let i = 0; i < TRAINPERSTEP; i++)
{
if (i == 0) trainit(true); // show only one per step - still flashes by
else trainit(false);
}
// do some testing per step
for (let i = 0; i < TESTPERSTEP; i++)
testit();
}
// keep drawing demo and doodle images
// and keep guessing - we will update our guess as time goes on
if ( demo_exists )
{
drawDemo();
guessDemo();
}
if ( doodle_exists )
{
drawDoodle();
// guessDoodle();
}
// detect doodle drawing
// (restriction) the following assumes doodle starts at 0,0
if ( mouseIsPressed ) // gets called when we click buttons, as well as if in doodle corner
{
// console.log ( mouseX + " " + mouseY + " " + pmouseX + " " + pmouseY );
var MAX = ZOOMPIXELS + 20; // can draw up to this pixels in corner
if ( (mouseX < MAX) && (mouseY < MAX) && (pmouseX < MAX) && (pmouseY < MAX) )
{
mousedrag = true; // start a mouse drag
doodle_exists = true;
doodle.stroke('white');
doodle.strokeWeight( DOODLE_THICK );
doodle.line(mouseX, mouseY, pmouseX, pmouseY);
clickX.push(mouseX);
clickY.push(mouseY);
}
}
else
{
// are we exiting a drawing
if ( mousedrag )
{
mousedrag = false;
// console.log ("Exiting draw. Now blurring.");
doodle.filter (BLUR, DOODLE_BLUR); // just blur once
// console.log (doodle);
}
}
}
function boundingBox() {
var minX = Math.min.apply(Math, clickX) - 20;
var maxX = Math.max.apply(Math, clickX) + 20;
var minY = Math.min.apply(Math, clickY) - 20;
var maxY = Math.max.apply(Math, clickY) + 20;
var tempCanvas = document.createElement("canvas"),
tCtx = tempCanvas.getContext("2d");
tempCanvas.width = maxX - minX;
tempCanvas.height = maxY - minY;
tCtx.drawImage(canvas, minX, minY, maxX - minX, maxY - minY, 0, 0, maxX - minX, maxY - minY);
var imgBox = document.getElementById("chart_box");
imgBox.src = tempCanvas.toDataURL();
return tempCanvas;
}
//--- demo -------------------------------------------------------------
// demo some test image and predict it
// get it from test set so have not used it in training
function makeDemo()
{
demo_exists = true;
var i = AB.randomIntAtoB ( 0, NOTEST - 1 );
demo = mnist.test_images[i];
var label = mnist.test_labels[i];
thehtml = "Test image no: " + i + "<br>" +
"Classification: " + label + "<br>" ;
AB.msg ( thehtml, 8 );
// type "demo" in console to see raw data
}
function drawDemo()
{
var theimage = getImage ( demo );
// console.log (theimage);
image ( theimage, 0, canvasheight - ZOOMPIXELS, ZOOMPIXELS, ZOOMPIXELS ); // magnified
image ( theimage, ZOOMPIXELS+50, canvasheight - ZOOMPIXELS, PIXELS, PIXELS ); // original
}
function guessDemo()
{
let inputs = getInputs ( demo );
demo_inputs = inputs; // can inspect in console
let prediction = nn.predict(inputs); // array of outputs
let guess = findMax(prediction); // the top output
thehtml = " We classify it as: " + greenspan + guess + "</span>" ;
AB.msg ( thehtml, 9 );
}
//--- doodle -------------------------------------------------------------
function drawDoodle()
{
// doodle is createGraphics not createImage
let theimage = doodle.get();
// console.log (theimage);
image ( theimage, 0, 0, ZOOMPIXELS, ZOOMPIXELS ); // original
image ( theimage, ZOOMPIXELS+50, 0, PIXELS, PIXELS ); // shrunk
}
function preprocessCanvas(image, modelName) {
// if model is not available, send the tensor with expanded dimensions
if (modelName === undefined) {
alert("No model defined..")
}
// if model is digitrecognizermlp, perform all the preprocessing
else if (modelName === "a") {
// resize the input image to digitrecognizermlp's target size of (784, )
let tensor = tf.browser.fromPixels(image)
.resizeNearestNeighbor([28, 28])
.mean(2)
.toFloat()
.reshape([1 , 784]);
return tensor.div(255.0);
}
// if model is digitrecognizercnn, perform all the preprocessing
else if (modelName === "b") {
// resize the input image to digitrecognizermlp's target size of (1, 28, 28, 1)
let tensor = tf.browser.fromPixels(image)
.resizeNearestNeighbor([28, 28])
.mean(2)
.expandDims(2)
.expandDims()
.toFloat();
console.log(tensor.shape);
return tensor.div(255.0);
}
// else throw an error
else {
alert("Unknown model name..")
}
}
//------------------------------
// Chart to display predictions
//------------------------------
var chart = "";
var firstTime = 0;
function loadChart(label, data, modelSelected) {
var context = document.getElementById('chart_box').getContext('2d');
chart = new Chart(context, {
// we are in need of a bar chart
type: 'bar',
// we feed in data dynamically using data variable
// that is passed as an argument to this function
data: {
labels: label,
datasets: [{
label: modelSelected + " prediction",
backgroundColor: '#f50057',
borderColor: 'rgb(255, 99, 132)',
data: data,
}]
},
// you can also play around with options for the
// chart if you find time!
options: {}
});
}
//----------------------------
// display chart with updated
// drawing from canvas
//----------------------------
function displayChart(data) {
var select_model = document.getElementById("select_model");
// var select_option = select_model.options[select_model.selectedIndex].value;
var select_option = 'a';
label = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"];
if (firstTime == 0) {
loadChart(label, data, select_option);
firstTime = 1;
} else {
chart.destroy();
loadChart(label, data, select_option);
}
document.getElementById('chart_box').style.display = "block";
}
async function classify() {
// let net;
// console.log('Loading mobilenet..');
// Load the model.
// net = await mobilenet.load();
// console.log('Successfully loaded model');
classifier = await tf.loadLayersModel('uploads/lukescales/processed_model_v2.2_update.json');
console.log(classifier);
// const imgEl = document.getElementById('defaultCanvas0');
// const doodle = tf.browser.fromPixels(imgEl);
// const img = doodle.get();
// img.resize ( PIXELS, PIXELS );
// img.loadPixels();
// set up inputs
// let inputs = [];
// for (let i = 0; i < PIXELSSQUARED ; i++)
// {
// inputs[i] = img.pixels[i] / 255;
// }
// const canvas = document.getElementById('defaultCanvas0');
// const ctx = canvas.getContext('2d');
// const imageData = ctx.createImageData(28, 28);
// // Iterate through every pixel
// for (let i = 0; i < imageData.data.length; i += 4) {
// // Modify pixel data
// imageData.data[i + 0] = 190; // R value
// imageData.data[i + 1] = 0; // G value
// imageData.data[i + 2] = 210; // B value
// imageData.data[i + 3] = 255; // A value
// }
// // Draw image data to the canvas
// ctx.putImageData(imageData, 28, 28);
// doodle_inputs = inputs;
// const imgEl = doodle.get();
// const result = await net.classify(imgEl);
// tfimg = createImage(28, 28);
// tfimg.loadPixels();
// for (let x = 0; x < tfimg.width; x++) {
// for (let y = 0; y < tfimg.height; y++) {
// let a = map(y, 0, tfimg.height, 255, 0);
// tfimg.set(x, y, [0, 153, 204, a]);
// }
// }
// tfimg.updatePixels();
//
// image.data[0] = 100;
// image.data[1] = 150;
// image.data[2] = 200;
// image.data[3] = 255;
// tf.browser.fromPixels(image)
// const uint8arr = Uint8ClampedArray.from(inputs);
// const image = new ImageData(uint8arr, 28, 28);
const img = doodle.get();
img.resize ( PIXELS, PIXELS );
img.loadPixels();
// img.pixels = img.pixels / 255.0;
img.pixels = img.pixels;
// const image = new ImageData(img.pixels, 28, 28);
// let inputs = getInputs ( demo );
const uint8arr = Uint8ClampedArray.from(img.pixels);
const image = new ImageData(uint8arr, 28, 28);
// get the user drawn region alone cropped
// croppedCanvas = boundingBox();
// show the cropped image
// document.getElementById("canvas_output").style.display = "block"
// const tensor = tf.browser.fromPixels(image);
// const tensor = preprocessCanvas(croppedCanvas, 'b');
let tensor = tf.browser.fromPixels(image)
.resizeNearestNeighbor([28, 28])
.mean(2)
.expandDims(2)
.expandDims();
// .toFloat();
console.log(tensor);
console.log(tensor.shape);
tensor.div(255.0);
console.log(tensor);
console.log(tensor.shape);
// const prediction = await classifier.predict(tensor).bytes();
let prediction = await classifier.predict(tensor).data();
// // get the model's prediction results
let results = Array.from(prediction)
// // display the predictions in chart
displayChart(results)
console.log(results);
console.log(prediction);
}
function guessDoodle()
{
// doodle is createGraphics not createImage
let img = doodle.get();
img.resize ( PIXELS, PIXELS );
img.loadPixels();
// set up inputs
let inputs = [];
for (let i = 0; i < PIXELSSQUARED ; i++)
{
inputs[i] = img.pixels[i * 4] / 255;
}
doodle_inputs = inputs; // can inspect in console
// feed forward to make prediction
let prediction = nn.predict(inputs); // array of outputs
let b = find12(prediction); // get no.1 and no.2 guesses
thehtml = " We classify it as: " + greenspan + b[0] + "</span> <br>" +
" No.2 guess is: " + greenspan + b[1] + "</span>";
AB.msg ( thehtml, 2 );
}
function gotResult(error, results) {
// Display error in the console
if (error) {
console.error(error);
}
// The results are in an array ordered by confidence.
console.log(results);
// Show the first label and confidence
// label.html('Label: ' + results[0].label);
// confidence.html('Confidence: ' + nf(results[0].confidence, 0, 2)); // Round the confidence to 0.01
thehtml = 'Label: ' + results[0].label +
'Confidence: ' + nf(results[0].confidence, 0, 2);
AB.msg ( thehtml, 2 );
}
function wipeDoodle()
{
classify();
// classifier.classify(doodle, gotResult);
setTimeout(function() {
doodle_exists = false;
doodle.background('black');
}, 3000);
}
// --- debugging --------------------------------------------------
// in console
// showInputs(demo_inputs);
// showInputs(doodle_inputs);
function showInputs ( inputs )
// display inputs row by row, corresponding to square of pixels
{
var str = "";
for (let i = 0; i < inputs.length; i++)
{
if ( i % PIXELS == 0 ) str = str + "\n"; // new line for each row of pixels
var value = inputs[i];
str = str + " " + value.toFixed(2) ;
}
console.log (str);
}
// Data.js content
const IMAGE_SIZE = 784;
const NUM_CLASSES = 10;
const NUM_DATASET_ELEMENTS = 65000;
const TRAIN_TEST_RATIO = 5 / 6;
const NUM_TRAIN_ELEMENTS = Math.floor(TRAIN_TEST_RATIO * NUM_DATASET_ELEMENTS);
const NUM_TEST_ELEMENTS = NUM_DATASET_ELEMENTS - NUM_TRAIN_ELEMENTS;
const MNIST_IMAGES_SPRITE_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/mnist_images.png';
const MNIST_LABELS_PATH =
'https://storage.googleapis.com/learnjs-data/model-builder/mnist_labels_uint8';
/**
* A class that fetches the sprited MNIST dataset and returns shuffled batches.
*
* NOTE: This will get much easier. For now, we do data fetching and
* manipulation manually.
*/
// class MnistData {
// constructor() {
// this.shuffledTrainIndex = 0;
// this.shuffledTestIndex = 0;
// }
function load() {
this.shuffledTrainIndex = 0;
this.shuffledTestIndex = 0;
// Make a request for the MNIST sprited image.
const img = new Image();
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
const imgRequest = new Promise((resolve, reject) => {
img.crossOrigin = '';
img.onload = () => {
img.width = img.naturalWidth;
img.height = img.naturalHeight;
const datasetBytesBuffer =
new ArrayBuffer(NUM_DATASET_ELEMENTS * IMAGE_SIZE * 4);
const chunkSize = 5000;
canvas.width = img.width;
canvas.height = chunkSize;
for (let i = 0; i < NUM_DATASET_ELEMENTS / chunkSize; i++) {
const datasetBytesView = new Float32Array(
datasetBytesBuffer, i * IMAGE_SIZE * chunkSize * 4,
IMAGE_SIZE * chunkSize);
ctx.drawImage(
img, 0, i * chunkSize, img.width, chunkSize, 0, 0, img.width,
chunkSize);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
for (let j = 0; j < imageData.data.length / 4; j++) {
// All channels hold an equal value since the image is grayscale, so
// just read the red channel.
datasetBytesView[j] = imageData.data[j * 4] / 255;
}
}
this.datasetImages = new Float32Array(datasetBytesBuffer);
resolve();
};
img.src = MNIST_IMAGES_SPRITE_PATH;
});
const labelsRequest = fetch(MNIST_LABELS_PATH);
const [imgResponse, labelsResponse] =
Promise.all([imgRequest, labelsRequest]);
this.datasetLabels = new Uint8Array(labelsResponse.arrayBuffer());
// Create shuffled indices into the train/test set for when we select a
// random dataset element for training / validation.
this.trainIndices = tf.util.createShuffledIndices(NUM_TRAIN_ELEMENTS);
this.testIndices = tf.util.createShuffledIndices(NUM_TEST_ELEMENTS);
// Slice the the images and labels into train and test sets.
this.trainImages =
this.datasetImages.slice(0, IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.testImages = this.datasetImages.slice(IMAGE_SIZE * NUM_TRAIN_ELEMENTS);
this.trainLabels =
this.datasetLabels.slice(0, NUM_CLASSES * NUM_TRAIN_ELEMENTS);
this.testLabels =
this.datasetLabels.slice(NUM_CLASSES * NUM_TRAIN_ELEMENTS);
}
function nextTrainBatch(batchSize) {
return this.nextBatch(
batchSize, [this.trainImages, this.trainLabels], () => {
this.shuffledTrainIndex =
(this.shuffledTrainIndex + 1) % this.trainIndices.length;
return this.trainIndices[this.shuffledTrainIndex];
});
}
function nextTestBatch(batchSize) {
return this.nextBatch(batchSize, [this.testImages, this.testLabels], () => {
this.shuffledTestIndex =
(this.shuffledTestIndex + 1) % this.testIndices.length;
return this.testIndices[this.shuffledTestIndex];
});
}
function nextBatch(batchSize, data, index) {
const batchImagesArray = new Float32Array(batchSize * IMAGE_SIZE);
const batchLabelsArray = new Uint8Array(batchSize * NUM_CLASSES);
for (let i = 0; i < batchSize; i++) {
const idx = index();
const image =
data[0].slice(idx * IMAGE_SIZE, idx * IMAGE_SIZE + IMAGE_SIZE);
batchImagesArray.set(image, i * IMAGE_SIZE);
const label =
data[1].slice(idx * NUM_CLASSES, idx * NUM_CLASSES + NUM_CLASSES);
batchLabelsArray.set(label, i * NUM_CLASSES);
}
const xs = tf.tensor2d(batchImagesArray, [batchSize, IMAGE_SIZE]);
const labels = tf.tensor2d(batchLabelsArray, [batchSize, NUM_CLASSES]);
return {xs, labels};
}
// TFJS tutorial stuff
function showExamples(data) {
// Create a container in the visor
const surface =
tfvis.visor().surface({ name: 'Input Data Examples', tab: 'Input Data'});
// Get the examples
const examples = data.nextTestBatch(20);
const numExamples = examples.xs.shape[0];
// Create a canvas element to render each example
for (let i = 0; i < numExamples; i++) {
const imageTensor = tf.tidy(() => {
// Reshape the image to 28x28 px
return examples.xs
.slice([i, 0], [1, examples.xs.shape[1]])
.reshape([28, 28, 1]);
});
const canvas = document.createElement('canvas');
canvas.width = 28;
canvas.height = 28;
canvas.style = 'margin: 4px;';
tf.browser.toPixels(imageTensor, canvas);
surface.drawArea.appendChild(canvas);
imageTensor.dispose();
}
}
function run() {
const data = load();
data.load();
showExamples(data);
}
document.addEventListener('DOMContentLoaded', run);