Code viewer for World: Perceptron (clone by Pavan...
// Cloned by Pavan Kirageri on 4 Nov 2021 from World "Perceptron" by "Coding Train" project 
// Please leave this clone trail here.



// Port from
// https://github.com/nature-of-code/noc-examples-p5.js/tree/master/chp10_nn/NOC_10_01_Perceptron


// A list of points we will use to "train" the perceptron
let training = new Array(100);

const LearningConstant = 1; // easier to watch if it is low 

// Coordinate space
let xmin = -1;
let ymin = -1;
let xmax = 1;
let ymax = 1;


// set fixed width run header 
AB.headerWidth(400);



// function to draw a line
// y = ax + b
// original: y = 0.3 * x + 0.4

const a = AB.randomFloatAtoB(0.1, 0.9);
const b = AB.randomFloatAtoB(0.1, 0.9);

function f(x) {
    return (a * x + b);
}

// classification is are you above or below line 
// perceptron should move towards the line 

function getClassification(x, y) {
    if (y < f(x)) return (-1);
    else return (1);
}




// Daniel Shiffman
// The Nature of Code
// http://natureofcode.com

// Simple Perceptron Example
// See: http://en.wikipedia.org/wiki/Perceptron

// Perceptron Class

// Perceptron is created with n weights and learning constant
class Perceptron {
    constructor(n, c) {
        // Array of weights for inputs
        this.weights = new Array(n);
        // Start with random weights
        for (let i = 0; i < this.weights.length; i++) {
            this.weights[i] = random(-1, 1);
        }
        this.c = c; // learning rate/constant
    }

    // Function to train the Perceptron
    // Weights are adjusted based on "desired" answer
    train(inputs, desired, count) {
        // Guess the result
        let guess = this.feedforward(inputs);
        // Compute the factor for changing the weight based on the error
        // Error = desired output - guessed output
        // Note this can only be 0, -2, or 2
        // Multiply by learning constant
        let error = desired - guess;
        // Adjust weights based on weightChange * input
        for (let i = 0; i < this.weights.length; i++) {
            this.weights[i] += (this.c - (count / 100)) * error * inputs[i];
        }
    }

    // Guess -1 or 1 based on input values
    feedforward(inputs) {
        // Sum all values
        let sum = 0;
        for (let i = 0; i < this.weights.length; i++) {
            sum += inputs[i] * this.weights[i];
        }
        // Result is sign of the sum, -1 or 1
        return this.activate(sum);
    }

    activate(sum) {
        if (sum > 0) return 1;
        else return -1;
    }

    // Return weights
    getWeights() {
        return this.weights;
    }
    getError() {
        let totalError = 0
        training.forEach((node, index) => {
            let predicted = this.feedforward(node.input)
            let expected = node.output
            console.log(predicted, expected)
            const MSR = Math.sqrt(Math.pow((predicted - expected), 2))
            totalError += MSR
        })
        return totalError
    }
}



// The Nature of Code
// Daniel Shiffman
// http://natureofcode.com

// Simple Perceptron Example
// See: http://en.wikipedia.org/wiki/Perceptron

// Code based on text "Artificial Intelligence", George Luger

// A Perceptron object
let ptron;

// We will train the perceptron with one "Point" object at a time
let count = 0;



function setup() {
    createCanvas(800, 800);

    // The perceptron has 3 inputs 
    // x, y, and bias
    ptron = new Perceptron(3, LearningConstant);

    // Create a random set of training points and calculate the "known" answer
    for (let i = 0; i < training.length; i++) {
        let x = random(xmin, xmax);
        let y = random(ymin, ymax);

        let answer = getClassification(x, y);

        training[i] = {
            input: [x, y, 1],
            output: answer
        };
    }
}


var step = 1;

function draw() {
    AB.msg("Line: y = " + a.toFixed(2) + " x + " + b.toFixed(2) +
        "<br> Step: " + step);
    step++;
    AB.msg("Learning Constant = " + (LearningConstant /step) + "<br> ")

    background('black');

    // Draw the line
    strokeWeight(3);
    stroke('lightblue');
    let x1 = map(xmin, xmin, xmax, 0, width);
    let y1 = map(f(xmin), ymin, ymax, height, 0);
    let x2 = map(xmax, xmin, xmax, 0, width);
    let y2 = map(f(xmax), ymin, ymax, height, 0);
    line(x1, y1, x2, y2);

    // Draw the line based on the current weights
    // Formula is weights[0]*x + weights[1]*y + weights[2] = 0
    stroke('white');
    let weights = ptron.getWeights();
    x1 = xmin;
    y1 = (-weights[2] - weights[0] * x1) / weights[1];
    x2 = xmax;
    y2 = (-weights[2] - weights[0] * x2) / weights[1];

    x1 = map(x1, xmin, xmax, 0, width);
    y1 = map(y1, ymin, ymax, height, 0);
    x2 = map(x2, xmin, xmax, 0, width);
    y2 = map(y2, ymin, ymax, height, 0);
    line(x1, y1, x2, y2);
    if((step > training.length && step % training.length === 0 && ptron.getError() === 0)){
        noLoop()
    }
        // Train the Perceptron with one "training" point at a time
        AB.msg("<br> Training on single point: " + count, 2);
        ptron.train(training[count].input, training[count].output, count);
        count = (count + 1) % training.length;
    
    
    // Draw all the points
    AB.msg("<br> Drawing points 0 to " + (count - 1), 3);

    for (let i = 0; i < count; i++) {
        strokeWeight(1);
        let guess = ptron.feedforward(training[i].input);

        let x = map(training[i].input[0], xmin, xmax, 0, width);
        let y = map(training[i].input[1], ymin, ymax, height, 0);

        // original version: based on what the Perceptron would "guess" - shows how its guess changes over time
        //   if (guess > 0)

        // this version: correct answer 

        if (getClassification(training[i].input[0], training[i].input[1]) == 1) {
            stroke('lightgreen');
            fill('lightgreen');
        } else {
            stroke('lightpink');
            fill('lightpink');
        }

        ellipse(x, y, 12, 12);
    }
}