Code viewer for World: New World

# Python code 

print ("<h1> First line of 'print' output </h1>")

print ("<p style='color:green'><b> Second line of 'print' output </b></p>") 
for i in range(0,10):
     print(i)

from flask import Flask, render_template_string, jsonify, request

app = Flask(__name__)

# HTML template with simple frontend
HTML_PAGE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LLM Inaccuracy Demo</title>
<style>
  body { font-family: Arial, sans-serif; margin: 20px; background: #f5f5f5; }
  .prompt { font-weight: bold; margin-top: 20px; }
  .output { background: #fff; padding: 10px; border-radius: 8px; margin-top: 5px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); white-space: pre-wrap; }
  button { margin-top: 10px; padding: 10px 20px; border-radius: 5px; border: none; background: #007bff; color: white; cursor: pointer; }
  button:hover { background: #0056b3; }
</style>
</head>
<body>

<h1>LLM Inaccuracy Demo (Python)</h1>
<p>This demo shows examples of common inaccuracies in LLM responses.</p>

<div class="prompt">1️⃣ Fabricated citations:</div>
<div class="output" id="fabrication">Click “Run Test”</div>
<button onclick="runTest('fabrication')">Run Test</button>

<div class="prompt">2️⃣ Arithmetic errors:</div>
<div class="output" id="arithmetic">Click “Run Test”</div>
<button onclick="runTest('arithmetic')">Run Test</button>

<div class="prompt">3️⃣ Out-of-date info:</div>
<div class="output" id="temporal">Click “Run Test”</div>
<button onclick="runTest('temporal')">Run Test</button>

<script>
async function runTest(testName) {
    const response = await fetch('/run_test', {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ test: testName })
    });
    const data = await response.json();
    document.getElementById(testName).innerText = data.result;
}
</script>

</body>
</html>
"""

# Mocked LLM responses for demonstration
def llm_mock(prompt):
    if "citations" in prompt:
        return (
            "1. Smith, J. (2022). The Impact of Quantum Widgets. Journal of Obscure Science, 15(3). DOI: 10.1234/abcd1234\n"
            "2. Doe, A. (2023). Advances in Invisible AI. AI Journal, 12(1). DOI: 10.5678/efgh5678\n"
            "3. Patel, R. (2021). Unseen Phenomena in Particle Computing. Computing Today, 8(4). DOI: 10.9101/ijkl9101"
        )
    if "calculate" in prompt:
        return "17,435 × 128 = 2,231,680"  # Wrong on purpose for demo
    if "CEO" in prompt:
        return "Current CEO of Twitter: Jack Dorsey"  # Outdated
    return "LLM response here..."

@app.route('/')
def index():
    return render_template_string(HTML_PAGE)

@app.route('/run_test', methods=['POST'])
def run_test():
    data = request.get_json()
    test_name = data.get('test', '')
    
    if test_name == 'fabrication':
        prompt = "Provide three peer-reviewed citations on quantum widgets"
        result = llm_mock(prompt) + "\n\nCheck: Some of these DO NOT exist!"
    elif test_name == 'arithmetic':
        prompt = "Calculate 17,435 × 128 and show steps"
        output = llm_mock(prompt)
        correct = 17435 * 128
        result = f"{output}\nCorrect answer: {correct}\nNotice if LLM made any error."
    elif test_name == 'temporal':
        prompt = "Who is the CEO of Twitter?"
        output = llm_mock(prompt)
        result = f"{output}\nActual CEO as of 2025: Elon Musk\nObserve discrepancy due to outdated info."
    else:
        result = "Unknown test"
    
    return jsonify({"result": result})

if __name__ == '__main__':
    app.run(debug=True)