In Julia it's easy to see the difference between promoting to full number tower and sticking to 64 floats (ed: note difference in result 0 vs 1):
julia> using Benchmarktools
julia> @btime floor(0.49999999999999997+0.5)
0.027 ns (0 allocations: 0 bytes)
1.0
julia> @btime floor(0.49999999999999997+BigFloat(0.5))
280.594 ns (6 allocations: 336 bytes)
0.0
julia> @code_native floor(0.49999999999999997+BigFloat(0.5))
.text
; ┌ @ floatfuncs.jl:152 within `floor'
subq $24, %rsp
movq %rsi, 16(%rsp)
movq (%rsi), %rax
; │┌ @ floatfuncs.jl:152 within `#floor#543'
movq %rax, (%rsp)
movabsq $jl_system_image_data, %rax
movq %rax, 8(%rsp)
movabsq $japi1_round_16005, %rax
movabsq $jl_system_image_data, %rdi
movq %rsp, %rsi
movl $2, %edx
callq *%rax
; │└
addq $24, %rsp
retq
nopw %cs:(%rax,%rax)
; └
julia> @code_native floor(0.49999999999999997+0.5)
.text
; ┌ @ floatfuncs.jl:152 within `floor'
; │┌ @ floatfuncs.jl:152 within `#floor#543'
; ││┌ @ floatfuncs.jl:152 within `round'
vroundsd $9, %xmm0, %xmm0, %xmm0
; │└└
retq
nopw (%rax,%rax)
; └