alias filetest {
var %lines = $lines($1-), %ticks = $ticks
.fopen filetest $1-
if (!$ferr) {
/fseek -l filetest %lines
}
.fclose filetest
echo -a File handling commands time taken: $calc($ticks - %ticks) ms.
return
:error
if (*command halted* iswm $error) {
close -@ @test
if ($fopen(filetest)) .fclose $v1
}
}
alias filtertest {
var %ticks = $ticks
filter -ff $1- NUL
echo -a Filter command time taken: $calc($ticks - %ticks) ms.
}
alias benchtest {
var %file = $qt($my.ff.log)
filetest %file
filtertest %file
}
File handling commands time taken: 2313 ms.
Filter command time taken: 2984 ms.
Hmmmm... you people don't know how to use /fseek!
I don't care if you choose Hixxy's way or mine. Both show that the operation itself is not the bottleneck. Using /fseek <handle> $file(<file>).size will take less than a millisecond to execute. You need perform this operation every time you open a file to append to it. The bottleneck is the interpreter. And 170,000 in 5 seconds comes to 34,000 lines per second. "Servers.ini" uses maybe 1000. The default setting for the window buffer is 5000 lines. So why the obsession with /filter?