using MLStyle
using BenchMarkTools
function my_sum(v::AbstractVector{T}) where {T <: Number}
s = 0
for i in v
s += i
end
s
end
function rec_sum(v::AbstractVector{T}) where {T <: Number}
@match v begin
[x, y...] => x + rec_sum(y)
[] => 0
_ => error("Never")
end
end
function rec_sum_indexed(v::AbstractVector{T}) where {T <: Number}
helper(i) = begin
i > length(v) ? 0 : v[i] + helper(i + 1)
end
helper(1)
end
v = 1:100
@btime sum(v)
@btime my_sum(v)
@btime rec_sum(v)
@btime rec_sum_indexed(v)
function my_sum(v::AbstractVector{T}) where {T <: Number}
s = 0
for i in v
s += i
end
s
end
function rec_sum(v::AbstractVector{T}) where {T <: Number}
@match v begin
[x, y...] => x + rec_sum(y)
[] => 0
_ => error("Never")
end
end
function rec_sum_indexed(v::AbstractVector{T}) where {T <: Number}
helper(i) = begin
i > length(v) ? 0 : v[i] + helper(i + 1)
end
helper(1)
end
v = 1:100
@btime sum(v)
@btime my_sum(v)
@btime rec_sum(v)
@btime rec_sum_indexed(v)
10.208 ns (1 allocation: 16 bytes)
12.930 ns (1 allocation: 16 bytes)
8.625 μs (195 allocations: 169.19 KiB)
2.727 μs (97 allocations: 1.53 KiB)
Performance Analysis (AI Generated)
Built-in sum(): Fastest due to SIMD optimizations and direct memory access patterns optimized by the compiler.
Iterative my_sum(): Close performance to built-in, uses simple loop with minimal overhead and stack allocation.
Recursive rec_sum(): Slowest due to pattern matching overhead, list destructuring creates new allocations for each recursive call (195 total), and deep call stack.
Indexed recursive rec_sum_indexed(): Better than pattern matching but still slow due to function call overhead for each recursion (97 calls), though avoids list destructuring allocations.