Code viewer for World: Hugging Face Model Explorer

// Cloned by Rajat Lashkare on 2 Dec 2023 from World "AI API Integration with Hugging Face" by Rajat Lashkare 
// Please leave this clone trail here.


// Cloned by Rajat Lashkare on 2 Dec 2023 from World "Chat with GPT model" by Starter user 
// Please leave this clone trail here.


// talk to Hugging Face using its Inference API
// adapted from:
// https://huggingface.co/docs/api-inference/index


// ----------------- GLOBAL VARIABLES ----------------- //

const ENDPOINT = `https://api-inference.huggingface.co`; // the AI API endpoint

let API_TOKEN = "";            // the API token we need to talk to the model, set by the user

// These models will be shown in the dropdowns, more can be added here...
const models = [{
  selectId: 'text-generation-model',
  models: [
    "gpt2",
    "bigscience/bloom",
    "distilgpt2",
    "tiiuae/falcon-7b-instruct"
  ]
}, {
  selectId: 'text-to-image-model',
  models: [
    "CompVis/stable-diffusion-v1-4",
    "runwayml/stable-diffusion-v1-5",
    "prompthero/openjourney",
    "Lykon/dreamshaper-8",
    "Guizmus/SDArt_UnmythicalCreatures",
    "xyn-ai/anything-v4.0",
  ]
}, {
  selectId: 'image-to-text-model',
  models: [
    "nlpconnect/vit-gpt2-image-captioning",
    "Salesforce/blip-image-captioning-large"
  ]
}, {
  selectId: 'image-classification-model',
  models: [
    "facebook/detr-resnet-50",
    "google/vit-base-patch16-224",
    "nateraw/vit-age-classifier",
    "Kaludi/food-category-classification-v2.0"
  ]
}, {
  selectId: 'text-classification-model',
  models: [
    "distilbert-base-uncased-finetuned-sst-2-english",
    "ProsusAI/finbert",
    "SamLowe/roberta-base-go_emotions"
  ]
}];


// ----------------- Implemented counter to show model response time ----------------- //


let isLoading = false;         // whether we are loading the data
let clockIntervalId = null;    // the interval id for the clock

// Calculate elapsed time
function elapsedTime(startTime) {
  return (Date.now() - startTime) / 1000;
}

// Start the clock, called before the request is sent
function startLoader(cloclElementId) {
  isLoading = true;

  if (cloclElementId) {
    const startTime = Date.now();

    clockIntervalId = setInterval(() => {
      showElement(cloclElementId, `(${elapsedTime(startTime).toFixed(2)}s)`);
    }, 10);
  }
}

// Stop the clock, called when the response is received
function stopLoader() {
  isLoading = false;
  if (clockIntervalId) clearInterval(clockIntervalId);
  clockIntervalId = null;
}


// set the API token
function setkey() {
  API_TOKEN = $("#apikey").val().trim();
  if (API_TOKEN.length > 0) {
    hideElement("token-container");
    showElement("token-activated");
  }
}

let activeSection = "";
// To open a tab
function openTab(evt, sectionId) {
  activeSection = sectionId;

  // get all children of tab-links-group and remove active class
  const tabLinks = document.getElementById("tab-links-group").children;
  for (let i = 0; i < tabLinks.length; i++) {
    tabLinks[i].classList.remove("active");
  }

  // get all children of tab-content-group and hide them
  const tabContents = document.getElementById("tab-content-group").children;
  for (let i = 0; i < tabContents.length; i++) {
    tabContents[i].style.display = "none";
  }

  // show the current tab, and add an "active" class to the button that opened the tab
  evt.currentTarget.classList.add("active");
  document.getElementById(sectionId).style.display = "block";

  const canvas = document.querySelector("#preview-canvas");
  if (canvas) {
    if (sectionId === "sec-image-classification") {
      canvas.style.display = "block";
    } else {
      canvas.style.display = "none";
    }
  }
}

// To redirect to model page on hugging face
function openModelOnHF(selectId) {
  const model = getInputValue(selectId);
  if (model && model.trim().length > 0) {
    // using a tag since window.location is blocked on ancient brain
    const a = document.createElement("a");
    a.href = `https://huggingface.co/${model}`;
    a.target = "_blank";
    a.click();
  }
}

// to get the model url
function getModelUrl(model) {
  return ENDPOINT + "/models/" + model;
}

// to send the query to huging face
async function query(model, data, contentType) {
  let headers = {
    "Authorization": `Bearer ${API_TOKEN}`
  }
  if (contentType) {
    headers["Content-Type"] = contentType;
  }

  const response = await fetch(getModelUrl(model), {
    method: "POST",
    headers: headers,
    body: data,
  }
  );
  if (!response.ok) {
    const responseText = await response.text();
    throw new Error(responseText);
  }
  return response;
}

// to handele error on query
function onError(statusId, error) {
  stopLoader();
  console.log(error);
  if (error.message && error.message.length > 0) {
    const errorJson = JSON.parse(error.message);
    let errString = errorJson.error;

    if (errString.includes("is currently loading")) {
      errString += ". Please try again in " + errorJson.estimated_time.toFixed(1) + " seconds.";
    }

    showElement(statusId, createAPIErrorElement(errString));
  } else {
    showElement(statusId, createErrorElement("Something went wrong! Please check the network tab."));
  }
}

// to clear the text
function clearText(id) {
  $("#" + id).val("");
}

// to clear the image
function clearImage(id) {
  if (isLoading) return;
  clearPreviewCanvas();
  $("#" + id).attr("src", "");
  hideElement(id);
}

function clearPreviewCanvas() {
  // query canvas and remove it if exists
  if (activeSection === "sec-image-classification") {
    const canvas = document.querySelector("#preview-canvas");
    if (canvas) canvas.remove();
  }
}

// to show/hide elements
function showElement(id, content) {
  if (id) {
    $("#" + id).show();
    if (content) $("#" + id).html(content);
  }
}

// to show/hide elements
function hideElement(id) {
  if (id) $("#" + id).hide();
}

// to get input value
function getInputValue(id) {
  return $("#" + id).val();
}

// to set input value
function setInputValue(id, val) {
  return $("#" + id).val(val);
}

// to get checkbox value
function getCheckboxValue(id) {
  return $("#" + id).prop("checked");
}

// to set checkbox value
function setCheckboxValue(id, val) {
  return $("#" + id).prop("checked", val);
}

// to create error element
function createErrorElement(message) {
  return `<span style='color: red;'>ERROR: ${message}</span>`;
}

// to create error element
function createAPIErrorElement(message) {
  return `<span style='color: red;'>API Responded: ${message}</span>`;
}

// to update number value of the slider
function updateTemperature(event, id) {
  const value = event.target.value;
  setInputValue(id, value);
}

// to select and preview image
function chooseImage(event, imgId) {
  const file = event.target.files[0];
  const reader = new FileReader();
  reader.onload = function (e) {
    $("#" + imgId).attr("src", e.target.result);
    clearPreviewCanvas();
    showElement(imgId);
  }
  reader.readAsDataURL(file);
}

// to show/hide element on checkbox checked
function hideOnChecked(event, id) {
  const checked = event.target.checked;
  if (checked) {
    hideElement(id);
  } else {
    showElement(id);
  }
}

// send the query and display the result for text generation
function queryTextGeneration() {
  // Check to avoid duplicate requests
  if (isLoading) return;


  const statusId = "text-generation-status";

  if (!API_TOKEN) {
    showElement(statusId, createErrorElement("ACCESS TOKEN NOT SET!"));
    return;
  }

  const input = getInputValue("text-generation-input");
  if (!input || input.trim().length === 0) {
    showElement(statusId, createErrorElement("PLEASE ENTER INPUT!"));
    return;
  }

  const model = getInputValue("text-generation-model");
  if (!model || model.trim().length === 0) {
    showElement(statusId, createErrorElement("MODEL NOT SELECTED!"));
    return;
  }

  const return_full_text = getCheckboxValue("text-generation-full-text");
  const temperature = getInputValue("text-generation-temperature");
  const data = {
    inputs: input,
    parameters: {
      return_full_text: return_full_text,
      temperature: parseFloat(temperature)
    }
  }

  showElement(statusId, "Loading, please wait...");
  startLoader("text-generation-clock");

  query(model, JSON.stringify(data), "application/json")
    .then(response => response.json())
    .then(response => {
      stopLoader();
      hideElement(statusId);

      showElement("text-generation-output", response[0].generated_text);
    }).catch((error) => onError(statusId, error));
}

// send the query and display the result for text classification
function queryTextClassification() {
  // Check to avoid duplicate requests
  if (isLoading) return;

  const statusId = "text-classification-status";

  if (!API_TOKEN) {
    showElement(statusId, createErrorElement("ACCESS TOKEN NOT SET!"));
    return;
  }

  const input = getInputValue("text-classification-input");
  if (!input || input.trim().length === 0) {
    showElement(statusId, createErrorElement("PLEASE ENTER INPUT!"));
    return;
  }

  const model = getInputValue("text-classification-model");
  if (!model || model.trim().length === 0) {
    showElement(statusId, createErrorElement("MODEL NOT SELECTED!"));
    return;
  }

  const data = {
    inputs: input
  }

  showElement(statusId, "Loading, please wait...");
  startLoader("text-classification-clock");

  query(model, JSON.stringify(data), "application/json")
    .then(response => response.json())
    .then(response => {

      stopLoader();
      hideElement(statusId);

      const tco = $("#text-classification-output");
      tco.html("");
      tco.append(`<label class="mt-2">Classification Output</label>`);

      if (response && response[0]) {
        response[0].forEach((item) => {
          tco.append(`
        <div class="flex-col flex-item mt-2">
            <div style="border: 1px solid black; border-radius: 4px;">
                <div class="bg-purple" style="height:4px;width:${Math.floor(item.score * 100)}%;"></div>
            </div>
            <div class="mt-1">
                <span class="mt-1 font-size-1">${item.label} - </span>
                <span class="mt-1 font-size-1">Score: ${item.score}</span>
            </div>
        </div>
        `);
        });
      }

    }).catch((error) => onError(statusId, error));
}

// send the query and display the result for image classification
async function queryImageClassification() {
  // Check to avoid duplicate requests
  if (isLoading) return;

  const statusId = "image-classification-status";

  if (!API_TOKEN) {
    showElement(statusId, createErrorElement("ACCESS TOKEN NOT SET!"));
    return;
  }

  const model = getInputValue("image-classification-model");
  if (!model || model.trim().length === 0) {
    showElement(statusId, createErrorElement("MODEL NOT SELECTED!"));
    return;
  }

  // Read file from"image-classification-input"
  const file = document.getElementById("image-classification-input").files[0];
  if (!file) {
    showElement(statusId, createErrorElement("PLEASE SELECT AN IMAGE!"));
    return;
  }

  showElement(statusId, "Loading, please wait...");
  startLoader("image-classification-clock");

  const reader = new FileReader();
  reader.onload = function (event) {
    const imageData = event.target.result;
    query(model, imageData)
      .then(response => response.json())
      .then(response => {
        stopLoader();
        hideElement(statusId);

        const tco = $("#image-classification-output");
        tco.html("");
        tco.append(`<label class="mt-2">Classification Output</label>`);

        const image = document.getElementById('image-classification-preview');

        // Create a canvas on top of the image
        var canvas = document.createElement('canvas');
        canvas.id = 'preview-canvas';
        canvas.width = image.width;
        canvas.height = image.height;
        canvas.style.position = 'absolute';
        canvas.style.left = image.offsetLeft + 'px';
        canvas.style.top = image.offsetTop + 'px';

        // Append the canvas to the body
        document.body.appendChild(canvas);

        // Get the 2D drawing context
        var context = canvas.getContext('2d');

        if (response && response instanceof Array && response.length > 0) {

          // Sort the response by box.xmin
          if (response[0].box) {
            response.sort((a, b) => {
              return a.box.xmin - b.box.xmin;
            });
          }

          response.forEach((item) => {
            // Generate a random rgba color
            const color = `rgba(${Math.floor(Math.random() * 255)},${Math.floor(Math.random() * 255)},${Math.floor(Math.random() * 255)},1)`;

            // Draw a red box on the image
            if (item.box) {
              context.beginPath();
              context.rect(item.box.xmin, item.box.ymin, item.box.xmax - item.box.xmin, item.box.ymax - item.box.ymin);
              context.lineWidth = 2;
              context.strokeStyle = color;
              context.stroke();
            }

            tco.append(`
          <div class="flex-col flex-item mt-2">
              <div style="border-radius: 4px;">
                  <div style="height:4px;width:${Math.floor(item.score * 100)}%;background-color:${color};"></div>
              </div>
              <div class="mt-1">
                  <span class="mt-1 font-size-1">${item.label} - </span>
                  <span class="mt-1 font-size-1">Score: ${item.score}</span>
              </div>
          </div>
          `);
          });
        }

      }).catch((error) => onError(statusId, error));
  };
  reader.readAsArrayBuffer(file);
}

// send the query and display the result for image to text
async function queryImageToText() {
  // Check to avoid duplicate requests
  if (isLoading) return;

  const statusId = "image-to-text-status";

  if (!API_TOKEN) {
    showElement(statusId, createErrorElement("ACCESS TOKEN NOT SET!"));
    return;
  }

  const model = getInputValue("image-to-text-model");
  if (!model || model.trim().length === 0) {
    showElement(statusId, createErrorElement("MODEL NOT SELECTED!"));
    return;
  }

  // Read file from"image-to-text-input"
  const file = document.getElementById("image-to-text-input").files[0];
  if (!file) {
    showElement(statusId, createErrorElement("PLEASE SELECT AN IMAGE!"));
    return;
  }

  showElement(statusId, "Loading, please wait...");
  startLoader("image-to-text-clock");

  const reader = new FileReader();
  reader.onload = function (event) {
    const imageData = event.target.result;
    query(model, imageData)
      .then(response => response.json())
      .then(response => {

        stopLoader();
        hideElement(statusId);

        showElement("image-to-text-output", response[0].generated_text);
      }).catch((error) => onError(statusId, error));
  };
  reader.readAsArrayBuffer(file);
}

// send the query and display the result for text to image
async function queryTextToImage() {
  // Check to avoid duplicate requests
  if (isLoading) return;

  const statusId = "text-to-image-status";

  if (!API_TOKEN) {
    showElement(statusId, createErrorElement("ACCESS TOKEN NOT SET!"));
    return;
  }

  const model = getInputValue("text-to-image-model");
  if (!model || model.trim().length === 0) {
    showElement(statusId, createErrorElement("MODEL NOT SELECTED!"));
    return;
  }

  const input = getInputValue("text-to-image-input");
  if (!input || input.trim().length === 0) {
    showElement(statusId, createErrorElement("PLEASE ENTER INPUT!"));
    return;
  }

  let parameters = {}

  const input_neg = getInputValue("text-to-image-input-neg").trim();
  if (input_neg && input_neg.length > 0) {
    parameters.negative_prompt = input_neg;
  }

  const guidance = getCheckboxValue("text-to-image-use-default");
  if (!guidance) {
    const input_guidance = getInputValue("text-to-image-guidance").trim();
    if (input_guidance && input_guidance.length > 0) {
      parameters.guidance_scale = parseFloat(input_guidance);
    }
  }

  const data = {
    inputs: input,
    parameters: Object.keys(parameters).length > 0 ? parameters : undefined
  }


  showElement(statusId, "Loading, please wait...");
  startLoader("text-to-image-clock");

  query(model, JSON.stringify(data), "application/json")
    .then(response => response.blob())
    .then((blob) => {
      stopLoader();
      hideElement(statusId);

      const imageUrl = URL.createObjectURL(blob);

      // Creating below structure:
      // <div>
      //     <hr>
      //     <input type="text" class="input-text full-width mb-1 mt-1" readonly>
      //     <div class="mb-1">Generated on: date</div>
      //     <img class="mb-1" src="" alt="Image Preview">
      // </div>

      const divEl = document.createElement("div");
      const hrEl = document.createElement("hr");
      const inputEl = document.createElement("input");
      const divEl2 = document.createElement("div");
      const imgEl = document.createElement("img");

      divEl.appendChild(hrEl);
      divEl.appendChild(inputEl);
      divEl.appendChild(divEl2);
      divEl.appendChild(imgEl);

      inputEl.setAttribute("type", "text");
      inputEl.setAttribute("class", "input-text full-width mb-1 mt-1");
      inputEl.setAttribute("readonly", "");
      inputEl.value = input;

      divEl2.setAttribute("class", "mb-1");
      divEl2.innerHTML = "Generated on: " + new Date().toLocaleString();

      imgEl.setAttribute("class", "mb-1");
      imgEl.setAttribute("src", imageUrl);
      imgEl.setAttribute("alt", "Image Preview");

      $("#text-to-image-output").prepend(divEl);
    }).catch((error) => onError(statusId, error));
}


document.write ( `
    <!-- Creating required styles -->
    <style>
        body {
            font-family: Arial, Helvetica, sans-serif;
            display: flex;
            flex-direction: column;
            align-items: center;
            margin: 0;
            background-color: antiquewhite !important;
        }

        .label-tag {
            padding: 5px 10px;
            border-radius: 5px;
        }

        .bg-blue {
            background-color: #2196F3;
        }

        .bg-white {
            background-color: white;
        }

        .bg-green {
            background-color: #4CAF50;
        }

        .bg-purple {
            background-color: #673ab7;
        }

        .bg-yellow {
            background-color: #ffeb3b;
        }

        .bg-brown {
            background-color: #7c4835;
        }

        .col-white {
            color: white;
        }

        .a-no-underline {
            text-decoration: none;
        }

        .inline-block {
            display: inline-block;
        }

        .hidden {
            display: none;
        }

        .block {
            display: block;
        }

        .pos-bottom {
            position: relative;
            bottom: 0;
        }

        .flex-col {
            display: flex;
            flex-direction: column;
        }

        .flex-row {
            display: flex;
            flex-direction: row;
            align-items: center;
        }

        .flex-wrap {
            flex-wrap: wrap;
        }

        .flex-item {
            flex: 1;
        }

        /* Creating margin and padding styles for layout purpose */
        .mr-1 {
            margin-right: 8px;
        }

        .ml-1 {
            margin-left: 8px;
        }

        .mt-1 {
            margin-top: 8px;
        }

        .mb-1 {
            margin-bottom: 8px;
        }

        .mr-2 {
            margin-right: 16px;
        }

        .ml-2 {
            margin-left: 16px;
        }

        .mt-2 {
            margin-top: 16px;
        }

        .mb-2 {
            margin-bottom: 16px;
        }

        .pt-2 {
            padding-top: 16px;
        }

        .pb-2 {
            padding-bottom: 16px;
        }

        .pl-2 {
            padding-left: 16px;
        }

        .pr-2 {
            padding-right: 16px;
        }

        .full-width {
            width: 100%;
        }

        .select-width {
            flex: 1;
            max-width: 420px;
        }

        .slider-width {
            flex: 1;
            max-width: 420px;
        }

        .slider-number-width {
            flex: 1;
            max-width: 80px;
        }

        .bold {
            font-weight: bold;
        }

        .font-size-1 {
            font-size: 12px;
        }

        .font-size-4 {
            font-size: 48px;
        }

        .btn-primary {
            background-color: rgb(243, 107, 33);
            color: white;
            border: none;
            border-radius: 5px;
            padding: 5px 10px;
            cursor: pointer;
        }

        .btn-primary:hover {
            background-color: rgb(243, 156, 33);
        }

        .btn-secondary {
            background-color: rgb(156, 139, 114);
            color: white;
            border: none;
            border-radius: 5px;
            padding: 5px 10px;
            cursor: pointer;
        }

        .btn-secondary:hover {
            background-color: rgb(97, 88, 73);
        }

        .hf-redirect-btn {
            width: 250px;
        }

        .input-text {
            padding: 5px 10px;
            border-radius: 5px;
            border: 1px solid #ccc;
        }

        .input-textarea {
            height: 100px;
            padding: 5px 10px;
            border-radius: 5px;
            border: 1px solid #ccc;
        }

        .model-container {
            border: 1px solid #ccc;
            border-radius: 5px;
            padding: 8px 32px;
            margin: 16px 10vw;
            align-items: stretch;
        }

        .tablinks {
            background-color: transparent;
            color: white;
            border: none;
            outline: none;
            cursor: pointer;
            padding: 10px 20px;
            font-weight: bold;
        }

        .tablinks:hover {
            background-color: rgb(243, 107, 33);
        }

        .active {
            background-color: rgb(243, 156, 33);
        }
    </style>

    <h1>
        <span class="bg-white label-tag" style="border-radius: 4px 0px 0px 4px">
            Hugging Face 🤗
        </span>
        <a target="_blank" class="a-no-underline" href="https://ancientbrain.com/world.php?world=8185996078">
            <span class="bg-blue col-white label-tag" style="border-radius: 0px 4px 4px 0px">
                Model Explorer
            </span>
        </a>
    </h1>


    <div class="mb-2">
        <span id="token-activated" class="bg-green col-white label-tag hidden">Access Token Set!</span>
        <div id="token-container">
            <span class="inline-block">Enter Access Token:</span>
            <input class="mr-2 ml-2 input-text" style='width:25vw;' maxlength='2000' NAME="apikey" id="apikey" VALUE=''>
            <button class="btn-primary" onclick='setkey();' class=ab-normbutton>Set API key</button>
        </div>
    </div>

    <h2 style="padding-bottom: 8px;">AI Models</h2>

    <!-- 
        Tabs for switching between model types
        Refered and modified from: https://www.w3schools.com/howto/howto_js_tabs.asp
    -->
    <div id="tab-links-group" class="bg-blue flex-row">
        <button id="sec-text-generation-tablink" class="tablinks" onclick="openTab(event, 'sec-text-generation')">TEXT
            GENERATION</button>
        <button id="sec-text-to-image-tablink" class="tablinks" onclick="openTab(event, 'sec-text-to-image')">TEXT TO
            IMAGE</button>
        <button id="sec-image-to-text-tablink" class="tablinks" onclick="openTab(event, 'sec-image-to-text')">IMAGE TO
            TEXT</button>
        <button id="sec-image-classification-tablink" class="tablinks"
            onclick="openTab(event, 'sec-image-classification')">IMAGE CLASSIFICATION</button>
        <button id="sec-text-classification-tablink" class="tablinks"
            onclick="openTab(event, 'sec-text-classification')">TEXT CLASSIFICATION</button>
    </div>

    <!-- Model containers for each model type -->
    <div id="tab-content-group" class="flex-item flex-col full-width">
        <section id="sec-text-generation" class="model-container hidden">
            <h3>Text Generation task</h3>
            <div><span class="bold">Category: </span><span
                    class="label-tag bg-purple col-white font-size-1 bold">NATURAL LANGUAGE PROCESSING</span></div>
            <p>Text generation is a natural language processing (NLP) task that involves creating new text, often based
                on existing data or prompts. It is a versatile tool that can be used for a variety of purposes,
                including machine translation, creative writing, and chatbots.</p>
            <p>There are several different approaches to text generation, but they all share the goal of producing text
                that is both grammatically correct and semantically meaningful. Some common techniques include
                statistical language modeling, recurrent neural networks, and transformers.</p>
            <form>
                <div class="flex-col flex-wrap">
                    <button type="button" onclick="openModelOnHF('text-generation-model');" class="btn-secondary mb-1 hf-redirect-btn">View selected model on Hugging Face</button>
                    <div class="flex-row">
                        <label for="text-generation-model">Select Model:</label>
                        <select id="text-generation-model" class="ml-2 input-text select-width"></select>
                        <input type="checkbox" name="return_full_text" id="text-generation-full-text" class="ml-2 mr-1"
                            checked>
                        <label for="text-generation-full-text">Return full text</label>
                    </div>
                    <div class="flex-col">
                        <p>Select the temperature of the sampling operation:</p>
                        <div class="flex-row">
                            <input class="slider-width" type="range" min="0" max="100" value="1" class="flex-item"
                                oninput="updateTemperature(event, 'text-generation-temperature');">
                            <input class="slider-number-width ml-2 input-text" type="number"
                                id="text-generation-temperature" value="1" readonly>
                        </div>
                        <p>(1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to
                            uniform probability.)</p>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="text-generation-input">Input Text</label>
                        <textarea id="text-generation-input" name="input" class="input-textarea"></textarea>
                    </div>
                    <div class="flex-rowl mt-2">
                        <button type="button" class="btn-secondary"
                            onclick='clearText("text-generation-input");'>Clear</button>
                        <button type="button" class="btn-primary" onclick='queryTextGeneration();'>Generate</button>
                        <span class="ml-2" id="text-generation-clock"></span>
                        <span class="ml-2" id="text-generation-status"></span>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="text-generation-output">Output Text</label>
                        <textarea id="text-generation-output" class="input-textarea" readonly></textarea>
                    </div>
                </div>
            </form>
        </section>
        <section id="sec-text-to-image" class="model-container hidden">
            <h3>Text to Image</h3>
            <div><span class="bold">Category: </span><span
                    class="label-tag bg-brown col-white font-size-1 bold">MULTIMODAL</span></div>
            <p>A text-to-image model, also known as an image generator, can create images from text descriptions. These
                models are trained on large datasets of text and images and can be used for various tasks like
                generating illustrations or concept art.</p>
            <form>
                <div class="flex-col flex-wrap">
                    <button type="button" onclick="openModelOnHF('text-to-image-model');" class="btn-secondary mb-1 hf-redirect-btn">View selected model on Hugging Face</button>
                    <div class="flex-row">
                        <label for="text-to-image-model">Select Model:</label>
                        <select id="text-to-image-model" class="ml-2 input-text select-width"></select>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="text-to-image-input">Input Prompt</label>
                        <input type="text" id="text-to-image-input" name="input" class="input-text full-width">
                        <label class="mb-1 mt-1" for="text-to-image-input-neg">Negative Prompt</label>
                        <input type="text" id="text-to-image-input-neg" name="input" class="input-text full-width">
                    </div>
                    <div class="flex-col">
                        <div class="flex-row">
                            <p>Guidance scale:</p>
                            <input type="checkbox" name="return_use_default" id="text-to-image-use-default"
                                class="ml-2 mr-1" onchange="hideOnChecked(event, 'text-to-image-guidance-slider');">
                            <label for="text-to-image-use-default">Use default</label>
                            <div id="text-to-image-guidance-slider" class="flex-row flex-item">
                                <div
                                    style="background-color: rgb(120, 120, 120); width: 2px; height: 16px; margin: 0 16px;">
                                </div>
                                <input class="slider-width" type="range" min="0" max="50" value="6" step="0.1"
                                    class="flex-item" oninput="updateTemperature(event, 'text-to-image-guidance');">
                                <input class="slider-number-width ml-2 input-text" type="number"
                                    id="text-to-image-guidance" value="6" readonly>
                            </div>
                        </div>
                    </div>
                    <div class="flex-rowl mt-2">
                        <button type="button" class="btn-secondary"
                            onclick='clearText("text-to-image-input");'>Clear</button>
                        <button type="button" class="btn-primary" onclick='queryTextToImage();'>Generate</button>
                        <span class="ml-2" id="text-to-image-clock"></span>
                        <span class="ml-2" id="text-to-image-status"></span>
                    </div>
                    <div class="mt-2">Outputs</div>
                    <div class="flex-col mt-2" id="text-to-image-output"></div>
                </div>
            </form>
        </section>
        <section id="sec-image-to-text" class="model-container hidden">
            <h3>Image to Text</h3>
            <div><span class="bold">Category: </span><span
                    class="label-tag bg-brown col-white font-size-1 bold">MULTIMODAL</span></div>
            <p>An image-to-text model generates text descriptions of images, trained on large datasets of image-text
                pairs. These models are used for tasks like alt text, image search optimization, and image-aware
                chatbots.</p>
            <form>
                <div class="flex-col flex-wrap">
                    <button type="button" onclick="openModelOnHF('image-to-text-model');" class="btn-secondary mb-1 hf-redirect-btn">View selected model on Hugging Face</button>
                    <div class="flex-row">
                        <label for="image-to-text-model">Select Model:</label>
                        <select id="image-to-text-model" class="ml-2 input-text select-width"></select>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="image-to-text-input">Choose Image</label>
                        <input type="file" accept="image/*" id="image-to-text-input" name="image-input"
                            onchange="chooseImage(event, 'image-to-text-preview');">
                        <div>
                            <img id="image-to-text-preview" class="mt-2 hidden" src="" alt="Image Preview">
                        </div>
                    </div>
                    <div class="flex-rowl mt-2">
                        <button type="reset" class="btn-secondary"
                            onclick='clearImage("image-to-text-preview");'>Clear</button>
                        <button type="button" class="btn-primary" onclick='queryImageToText();'>Generate</button>
                        <span class="ml-2" id="image-to-text-clock"></span>
                        <span class="ml-2" id="image-to-text-status"></span>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="image-to-text-output">Output Text</label>
                        <textarea id="image-to-text-output" class="input-textarea" readonly></textarea>
                    </div>
                </div>
            </form>
        </section>
        <section id="sec-image-classification" class="model-container hidden">
            <h3>Image Classification task</h3>
            <div><span class="bold">Category: </span><span class="label-tag bg-yellow font-size-1 bold">COMPUTER
                    VISION</span></div>
            <p>Image classification is a fundamental task in computer vision that involves assigning a predefined
                category or label to an image. It plays a crucial role in various applications, including object
                recognition, scene understanding, and medical image analysis.</p>
            <p>Image classification models are trained on large datasets of labeled images, enabling them to extract
                meaningful features and patterns from visual data. Convolutional neural networks (CNNs) are the most
                widely used architecture for image classification, as they excel at capturing spatial relationships and
                hierarchies within images.</p>
            <form>
                <div class="flex-col flex-wrap">
                    <button type="button" onclick="openModelOnHF('image-classification-model');" class="btn-secondary mb-1 hf-redirect-btn">View selected model on Hugging Face</button>
                    <div class="flex-row">
                        <label for="image-classification-model">Select Model:</label>
                        <select id="image-classification-model" class="ml-2 input-text select-width"></select>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="image-classification-input">Choose Image</label>
                        <input type="file" accept="image/*" id="image-classification-input" name="image-input"
                            onchange="chooseImage(event, 'image-classification-preview');">
                        <div>
                            <img id="image-classification-preview" class="mt-2 hidden" src="" alt="Image Preview">
                        </div>
                    </div>
                    <div class="flex-rowl mt-2">
                        <button type="reset" class="btn-secondary"
                            onclick='clearImage("image-classification-preview");'>Clear</button>
                        <button type="button" class="btn-primary"
                            onclick='queryImageClassification();'>Generate</button>
                        <span class="ml-2" id="image-classification-clock"></span>
                        <span class="ml-2" id="image-classification-status"></span>
                    </div>
                    <div class="flex-col mt-2" id="image-classification-output"></div>
                </div>
            </form>
        </section>
        <section id="sec-text-classification" class="model-container hidden">
            <h3>Text Classification task</h3>
            <div><span class="bold">Category: </span><span
                    class="label-tag bg-purple col-white font-size-1 bold">NATURAL LANGUAGE PROCESSING</span></div>
            <p>Text classification is a natural language processing (NLP) task that involves assigning a predefined
                category to a piece of text. It is a fundamental task in NLP with a wide range of applications, such as
                sentiment analysis, topic modeling, and spam filtering.</p>
            <p>There are several different approaches to text classification, but they all share the goal of accurately
                assigning categories to text. Some common techniques include rule-based systems, machine learning
                algorithms, and deep learning models.</p>
            <form>
                <div class="flex-col flex-wrap">
                    <button type="button" onclick="openModelOnHF('text-classification-model');" class="btn-secondary mb-1 hf-redirect-btn">View selected model on Hugging Face</button>
                    <div class="flex-row">
                        <label for="text-classification-model">Select Model:</label>
                        <select id="text-classification-model" class="ml-2 input-text select-width"></select>
                    </div>
                    <div class="flex-col mt-2">
                        <label class="mb-1" for="text-classification-input">Input Text</label>
                        <textarea id="text-classification-input" name="input" class="input-textarea"></textarea>
                    </div>
                    <div class="flex-rowl mt-2">
                        <button type="button" class="btn-secondary"
                            onclick='clearText("text-classification-input");'>Clear</button>
                        <button type="button" class="btn-primary" onclick='queryTextClassification();'>Generate</button>
                        <span class="ml-2" id="text-classification-clock"></span>
                        <span class="ml-2" id="text-classification-status"></span>
                    </div>
                    <div class="flex-col mt-2" id="text-classification-output"></div>
                </div>
            </form>
        </section>
    </div>

    <footer>
        <h4>Things to remember:</h4>
        <p>
            <i>
                <b>Note 1:</b> Sometimes the API will throw an error that the <b>model is currently loading</b>. This is normal
                for fee usage. Please try again in sometime.
            </i>
        </p>
        <p>
            <i>
                <b>Note 2:</b> If there is an <b>unknown error</b>. Please check the network tab for response.</br>If response indicates CUDA out of memory warning then try to reduce the input prompt or image size.
            </i>
        </p>
        <p>
            <i>
                <b>Note 3:</b> As always, be warned that GPT replies are often completely inaccurate. All LLM systems
                <a href="https://www.google.com/search?q=llm+hallucination"> "hallucinate"</a>.
                It is how they work.
            </i>
        </p>
    </footer>
` );


// ----------------- Loading Data ----------------- //


// Adding models to dropdowns dynamically
models.forEach((model) => {
  const selectId = model.selectId;
  const models = model.models;

  // Add options to select
  models.forEach((model) => {
    $("#" + selectId).append(`<option value="${model}">${model}</option>`);
  });
});

// Setting example values to inputs
setInputValue("text-generation-input", "The answer to the universe is");
setInputValue("text-to-image-input", "A high tech solarpunk utopia in the Amazon rainforest");
setInputValue("text-classification-input", "This is the best website I have ever seen.");

// Setting default values
setCheckboxValue("text-to-image-use-default", true);
hideElement("text-to-image-guidance-slider");

// Trigger event click of #sec-text-generation-tablink to open the first tab on page load
document.getElementById("sec-text-generation-tablink").click();