New benchmarks: - http_benchmark.lux: Minimal HTTP server for throughput testing - Use with wrk or ab for request/second measurements - Target: > 50k req/sec - json_benchmark.lux: JSON parsing performance test - Token counting simulation - Measures iterations per second These complement the existing recursive benchmarks (fib, ackermann) with web-focused performance tests. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
82 lines
2.5 KiB
Plaintext
82 lines
2.5 KiB
Plaintext
// JSON Parsing Benchmark
|
|
//
|
|
// Benchmarks JSON parsing performance.
|
|
// Run with: lux benchmarks/json_benchmark.lux
|
|
//
|
|
// This benchmark:
|
|
// 1. Generates a large JSON string
|
|
// 2. Parses it multiple times
|
|
// 3. Reports timing
|
|
|
|
// Generate a JSON string with n objects
|
|
fn generateJsonObject(i: Int): String = {
|
|
"{\"id\":" + toString(i) + ",\"name\":\"item" + toString(i) + "\",\"active\":true,\"value\":" + toString(i * 100) + "}"
|
|
}
|
|
|
|
fn generateJsonArray(n: Int, i: Int, acc: String): String = {
|
|
if i >= n then acc
|
|
else {
|
|
let obj = generateJsonObject(i)
|
|
let sep = if i == 0 then "" else ","
|
|
generateJsonArray(n, i + 1, acc + sep + obj)
|
|
}
|
|
}
|
|
|
|
fn generateLargeJson(n: Int): String = {
|
|
"[" + generateJsonArray(n, 0, "") + "]"
|
|
}
|
|
|
|
// Simple JSON token counting (simulates parsing)
|
|
fn countJsonTokens(json: String, i: Int, count: Int): Int = {
|
|
if i >= String.length(json) then count
|
|
else {
|
|
let char = String.substring(json, i, i + 1)
|
|
let newCount =
|
|
if char == "{" then count + 1
|
|
else if char == "}" then count + 1
|
|
else if char == "[" then count + 1
|
|
else if char == "]" then count + 1
|
|
else if char == ":" then count + 1
|
|
else if char == "," then count + 1
|
|
else count
|
|
countJsonTokens(json, i + 1, newCount)
|
|
}
|
|
}
|
|
|
|
// Run benchmark n times
|
|
fn runBenchmark(json: String, n: Int, totalTokens: Int): Int = {
|
|
if n <= 0 then totalTokens
|
|
else {
|
|
let tokens = countJsonTokens(json, 0, 0)
|
|
runBenchmark(json, n - 1, totalTokens + tokens)
|
|
}
|
|
}
|
|
|
|
fn main(): Unit with {Console, Time} = {
|
|
Console.print("JSON Parsing Benchmark")
|
|
Console.print("======================")
|
|
Console.print("")
|
|
|
|
// Generate large JSON (~100 objects)
|
|
Console.print("Generating JSON data...")
|
|
let json = generateLargeJson(100)
|
|
Console.print(" JSON size: " + toString(String.length(json)) + " bytes")
|
|
Console.print("")
|
|
|
|
// Benchmark parsing
|
|
Console.print("Running benchmark (1000 iterations)...")
|
|
let startTime = Time.now()
|
|
let totalTokens = runBenchmark(json, 1000, 0)
|
|
let endTime = Time.now()
|
|
let elapsed = endTime - startTime
|
|
|
|
Console.print("")
|
|
Console.print("Results:")
|
|
Console.print(" Total tokens parsed: " + toString(totalTokens))
|
|
Console.print(" Time: " + toString(elapsed) + " ms")
|
|
Console.print(" Iterations per second: " + toString((1000 * 1000) / elapsed))
|
|
Console.print("")
|
|
}
|
|
|
|
let result = run main() with {}
|