// Cloned by Aoife Doherty on 17 Nov 2020 from World "Perceptron" by "Coding Train" project
// Please leave this clone trail here.
// Port from
// https://github.com/nature-of-code/noc-examples-p5.js/tree/master/chp10_nn/NOC_10_01_Perceptron
// A list of points we will use to "train" the perceptron
let training = new Array(400); //Question 1: Change the number of points
//the learning rate defines the step of each learning turn. If it's too slow, it'll never get there, if it's too high, it'll take ages (e.g. if you high, it jumps to either side of line)
//A good idea in machine learning is to start high, and get narrower
const LearningConstant = 0.01; // easier to watch if it is low //Question 2: Change the learning constant (I then changed it back)
// Coordinate space
let xmin = -1;
let ymin = -1;
let xmax = 1;
let ymax = 1;
// set fixed width run header
AB.headerWidth ( 400 );
// function to draw a line
// y = ax + b
// original: y = 0.3 * x + 0.4
//this draws the random starting line with y = ax + b
const a = AB.randomFloatAtoB ( 0.1, 0.9 );
const b = AB.randomFloatAtoB ( 0.1, 0.9 );
function f(x)
{
return ( a * x + b );
}
// classification is are you above or below line
// perceptron should move towards the line; if y < predicted y from the line, it's -1, else 1
function getClassification ( x, y )
{
if (y < f(x)) return ( -1 );
else return ( 1 );
}
// Daniel Shiffman
// The Nature of Code
// http://natureofcode.com
// Simple Perceptron Example
// See: http://en.wikipedia.org/wiki/Perceptron
// Perceptron Class
// Perceptron is created with n weights and learning constant
class Perceptron
{
//constructor is what is automatically called when the class is called.
//n is a number in the array, c is the learning rate
constructor(n, c)
{
// Array of weights for inputs
this.weights = new Array(n);
// Start with random weights
for (let i = 0; i < this.weights.length; i++) {
this.weights[i] = random(-1, 1);
}
this.c = c; // learning rate/constant
}
// Function to train the Perceptron
// Weights are adjusted based on "desired" answer
train(inputs, desired)
{
// Guess the result
let guess = this.feedforward(inputs);
// Compute the factor for changing the weight based on the error
// Error = desired output - guessed output
// Note this can only be 0, -2, or 2
// Multiply by learning constant
this.error = desired - guess;
// Adjust weights based on weightChange * input
for (let i = 0; i < this.weights.length; i++)
{
this.weights[i] += this.c * this.error * inputs[i];
}
}
// Guess -1 or 1 based on input values
feedforward(inputs)
{
// Sum all values
let sum = 0;
for (let i = 0; i < this.weights.length; i++)
{
sum += inputs[i] * this.weights[i];
}
// Result is sign of the sum, -1 or 1
return this.activate(sum);
}
activate(sum)
{
if (sum > 0) return 1;
else return -1;
}
// Return weights
getWeights()
{
return this.weights;
}
}
// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com
// Simple Perceptron Example
// See: http://en.wikipedia.org/wiki/Perceptron
// Code based on text "Artificial Intelligence", George Luger
// A Perceptron object
let ptron;
// We will train the perceptron with one "Point" object at a time
let count = 0;
function setup()
{
createCanvas(800, 800);
// The perceptron has 3 inputs
// x, y, and bias
for (let i = 0; i < 100; i--) {
ptron = new Perceptron ( 3, LearningConstant );
// Create a random set of training points and calculate the "known" answer
for (let i = 0; i < training.length; i++)
{
let x = random(xmin, xmax);
let y = random(ymin, ymax);
let answer = getClassification ( x, y );
training[i] =
{
input: [x, y, 1],
output: answer
};
}
}
var step = 1;
function draw()
{
AB.msg ( "Line: y = " + a.toFixed(2) + " x + " + b.toFixed(2) +
"<br> Step: " + step );
step++;
background('black');
// Draw the line
strokeWeight(4);
stroke('red');
let x1 = map(xmin, xmin, xmax, 0, width);
let y1 = map(f(xmin), ymin, ymax, height, 0);
let x2 = map(xmax, xmin, xmax, 0, width);
let y2 = map(f(xmax), ymin, ymax, height, 0);
line(x1, y1, x2, y2);
// Draw the line based on the current weights
// Formula is weights[0]*x + weights[1]*y + weights[2] = 0
stroke('white');
let weights = ptron.getWeights();
x1 = xmin;
y1 = (-weights[2] - weights[0] * x1) / weights[1];
x2 = xmax;
y2 = (-weights[2] - weights[0] * x2) / weights[1];
x1 = map(x1, xmin, xmax, 0, width);
y1 = map(y1, ymin, ymax, height, 0);
x2 = map(x2, xmin, xmax, 0, width);
y2 = map(y2, ymin, ymax, height, 0);
line(x1, y1, x2, y2);
// Train the Perceptron with one "training" point at a time
AB.msg ( "<br> Training on single point: " + count, 2 );
ptron.train(training[count].input, training[count].output);
count = (count + 1) % training.length;
// Draw all the points
AB.msg ( "<br> Drawing points 0 to " + (count-1), 3 );
for (let i = 0; i < count; i++)
{
strokeWeight(1);
let guess = ptron.feedforward(training[i].input);
let x = map(training[i].input[0], xmin, xmax, 0, width);
let y = map(training[i].input[1], ymin, ymax, height, 0);
// original version: based on what the Perceptron would "guess" - shows how its guess changes over time
// if (guess > 0)
// this version: correct answer
if ( getClassification ( training[i].input[0], training[i].input[1] ) == 1 )
{
stroke('lightgreen');
fill('lightgreen');
}
else
{
stroke('lightpink');
fill('lightpink');
}
ellipse(x, y, 12, 12);
}
}
}