feat: add FBIP debug counters to prove optimization effectiveness

Add runtime counters tracking FBIP reuse vs copy operations:
- lux_fbip_reuse_count: incremented when rc=1 allows in-place mutation
- lux_fbip_copy_count: incremented when rc>1 forces allocation

Output now shows both memory stats and FBIP stats:
  [RC] No leaks: 13 allocs, 13 frees
  [FBIP] 3 reuses, 0 copies

Rename test_no_fbip.lux to test_ownership_transfer.lux to better
reflect that ownership transfer enables FBIP even with aliases.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-14 14:52:35 -05:00
parent 25a3adf4fc
commit bba94b534d
3 changed files with 60 additions and 20 deletions

View File

@@ -1,20 +0,0 @@
// Test WITHOUT FBIP by forcing rc>1 (shared reference)
fn useList(l: List<Int>): Int = {
List.length(l)
}
fn main(): Unit = {
Console.print("=== Without FBIP (forced rc>1) ===")
let a = List.range(1, 100)
// Create alias to bump rc, preventing FBIP
let alias = a
let len1 = useList(alias)
// Now 'a' has rc>1 so map must allocate new list
let b = List.map(a, fn(x: Int): Int => x * 2)
let c = List.filter(b, fn(x: Int): Bool => x > 50)
let d = List.reverse(c)
Console.print("Shared reference chain done")
}

View File

@@ -0,0 +1,21 @@
// Test demonstrating ownership transfer with aliases
// The ownership transfer optimization ensures FBIP still works
// even when variables are aliased, because ownership is transferred
// rather than reference count being incremented.
fn main(): Unit = {
Console.print("=== Ownership Transfer Test ===")
let a = List.range(1, 100)
// Ownership transfers from 'a' to 'alias', keeping rc=1
let alias = a
let len1 = List.length(alias)
// Since ownership transferred, 'a' still has rc=1
// FBIP can still optimize map/filter/reverse
let b = List.map(a, fn(x: Int): Int => x * 2)
let c = List.filter(b, fn(x: Int): Bool => x > 50)
let d = List.reverse(c)
Console.print("Ownership transfer chain done")
}

View File

@@ -452,6 +452,10 @@ impl CBackend {
self.writeln("static int64_t lux_rc_alloc_count = 0;");
self.writeln("static int64_t lux_rc_free_count = 0;");
self.writeln("");
self.writeln("// FBIP (Functional But In-Place) optimization counters");
self.writeln("static int64_t lux_fbip_reuse_count = 0;");
self.writeln("static int64_t lux_fbip_copy_count = 0;");
self.writeln("");
}
self.writeln("// Allocate RC-managed memory with initial refcount of 1");
@@ -508,6 +512,11 @@ impl CBackend {
self.writeln(" fprintf(stderr, \"[RC] No leaks: %lld allocs, %lld frees\\n\",");
self.writeln(" (long long)lux_rc_alloc_count, (long long)lux_rc_free_count);");
self.writeln(" }");
self.writeln(" // Print FBIP optimization stats");
self.writeln(" if (lux_fbip_reuse_count > 0 || lux_fbip_copy_count > 0) {");
self.writeln(" fprintf(stderr, \"[FBIP] %lld reuses, %lld copies\\n\",");
self.writeln(" (long long)lux_fbip_reuse_count, (long long)lux_fbip_copy_count);");
self.writeln(" }");
self.writeln("}");
self.writeln("");
}
@@ -1041,6 +1050,9 @@ impl CBackend {
self.writeln("static LuxList* lux_list_reverse(LuxList* list) {");
self.writeln(" // FBIP: If rc=1, reverse in-place instead of copying");
self.writeln(" if (LUX_RC_HEADER(list)->rc == 1) {");
if self.debug_rc {
self.writeln(" lux_fbip_reuse_count++;");
}
self.writeln(" // In-place reversal - just swap element pointers");
self.writeln(" int64_t n = list->length;");
self.writeln(" for (int64_t i = 0; i < n / 2; i++) {");
@@ -1050,6 +1062,9 @@ impl CBackend {
self.writeln(" }");
self.writeln(" return list; // Reuse same list");
self.writeln(" }");
if self.debug_rc {
self.writeln(" lux_fbip_copy_count++;");
}
self.writeln(" // rc > 1: Allocate new list (standard path)");
self.writeln(" LuxList* result = lux_list_new(list->length);");
self.writeln(" for (int64_t i = 0; i < list->length; i++) {");
@@ -1068,6 +1083,9 @@ impl CBackend {
self.writeln(" }");
self.writeln(" // FBIP: If rc=1, truncate in-place");
self.writeln(" if (LUX_RC_HEADER(list)->rc == 1) {");
if self.debug_rc {
self.writeln(" lux_fbip_reuse_count++;");
}
self.writeln(" // Decref elements we're dropping");
self.writeln(" for (int64_t i = n; i < list->length; i++) {");
self.writeln(" lux_decref(list->elements[i]);");
@@ -1075,6 +1093,9 @@ impl CBackend {
self.writeln(" list->length = n;");
self.writeln(" return list; // Reuse same list");
self.writeln(" }");
if self.debug_rc {
self.writeln(" lux_fbip_copy_count++;");
}
self.writeln(" // rc > 1: Allocate new list");
self.writeln(" LuxList* result = lux_list_new(n);");
self.writeln(" for (int64_t i = 0; i < n; i++) {");
@@ -1094,6 +1115,9 @@ impl CBackend {
self.writeln(" int64_t new_len = list->length - n;");
self.writeln(" // FBIP: If rc=1, shift elements in-place");
self.writeln(" if (LUX_RC_HEADER(list)->rc == 1) {");
if self.debug_rc {
self.writeln(" lux_fbip_reuse_count++;");
}
self.writeln(" // Decref elements we're dropping");
self.writeln(" for (int64_t i = 0; i < n; i++) {");
self.writeln(" lux_decref(list->elements[i]);");
@@ -1105,6 +1129,9 @@ impl CBackend {
self.writeln(" list->length = new_len;");
self.writeln(" return list; // Reuse same list");
self.writeln(" }");
if self.debug_rc {
self.writeln(" lux_fbip_copy_count++;");
}
self.writeln(" // rc > 1: Allocate new list");
self.writeln(" LuxList* result = lux_list_new(new_len);");
self.writeln(" for (int64_t i = 0; i < new_len; i++) {");
@@ -2475,6 +2502,9 @@ impl CBackend {
self.writeln(&format!("LuxList* {};", result_var));
self.writeln(&format!("if (LUX_RC_HEADER({})->rc == 1) {{", list));
self.indent += 1;
if self.debug_rc {
self.writeln("lux_fbip_reuse_count++;");
}
self.writeln(&format!("// FBIP: Reuse list in-place"));
self.writeln(&format!("{} = {};", result_var, list));
self.writeln(&format!("for (int64_t {} = 0; {} < {}->length; {}++) {{", i_var, i_var, list, i_var));
@@ -2489,6 +2519,9 @@ impl CBackend {
self.indent -= 1;
self.writeln("} else {");
self.indent += 1;
if self.debug_rc {
self.writeln("lux_fbip_copy_count++;");
}
self.writeln(&format!("// Allocate new list"));
self.writeln(&format!("{} = lux_list_new({}->length);", result_var, list));
self.writeln(&format!("for (int64_t {} = 0; {} < {}->length; {}++) {{", i_var, i_var, list, i_var));
@@ -2528,6 +2561,9 @@ impl CBackend {
self.writeln(&format!("int64_t {} = 0;", count_var));
self.writeln(&format!("if (LUX_RC_HEADER({})->rc == 1) {{", list));
self.indent += 1;
if self.debug_rc {
self.writeln("lux_fbip_reuse_count++;");
}
self.writeln(&format!("// FBIP: Filter in-place"));
self.writeln(&format!("{} = {};", result_var, list));
self.writeln(&format!("for (int64_t {} = 0; {} < {}->length; {}++) {{", i_var, i_var, list, i_var));
@@ -2550,6 +2586,9 @@ impl CBackend {
self.indent -= 1;
self.writeln("} else {");
self.indent += 1;
if self.debug_rc {
self.writeln("lux_fbip_copy_count++;");
}
self.writeln(&format!("// Allocate new list"));
self.writeln(&format!("{} = lux_list_new({}->length);", result_var, list));
self.writeln(&format!("for (int64_t {} = 0; {} < {}->length; {}++) {{", i_var, i_var, list, i_var));