Explorar el Código

pattern matcher

Will hace 4 meses
padre
commit
33137706c2
Se han modificado 63 ficheros con 3896 adiciones y 31 borrados
  1. 12 1
      SS.k
  2. 43 0
      excelpm.k
  3. 18 0
      exec.sh
  4. 7 0
      ngnk-libs/README.org
  5. 33 0
      ngnk-libs/audio/abc.k
  6. 75 0
      ngnk-libs/audio/example.k
  7. 8 0
      ngnk-libs/audio/riff.k
  8. 35 0
      ngnk-libs/audio/wav.k
  9. 60 0
      ngnk-libs/bigint/bigint.k
  10. 45 0
      ngnk-libs/bigint/examples.k
  11. 10 0
      ngnk-libs/classic-cs/README.org
  12. 44 0
      ngnk-libs/classic-cs/heap.k
  13. 31 0
      ngnk-libs/classic-cs/sparse-example.k
  14. 15 0
      ngnk-libs/classic-cs/sparse.k
  15. 60 0
      ngnk-libs/classic-cs/state-machines/example.k
  16. 11 0
      ngnk-libs/classic-cs/state-machines/parse.k
  17. 12 0
      ngnk-libs/compression/README.org
  18. 47 0
      ngnk-libs/compression/lz4.k
  19. 13 0
      ngnk-libs/compression/lz4example.k
  20. 17 0
      ngnk-libs/csv/README.org
  21. 10 0
      ngnk-libs/csv/csv.k
  22. 43 0
      ngnk-libs/ffi/README.org
  23. 196 0
      ngnk-libs/ffi/ext.diff
  24. 23 0
      ngnk-libs/ffi/make.diff
  25. 21 0
      ngnk-libs/ffi/makefile
  26. 11 0
      ngnk-libs/image/error-diffusion-example.k
  27. 59 0
      ngnk-libs/image/error-diffusion.k
  28. 8 0
      ngnk-libs/image/ordered-dither-example.k
  29. 10 0
      ngnk-libs/image/ordered-dither.k
  30. 25 0
      ngnk-libs/ipc/json-rpc-example.k
  31. 40 0
      ngnk-libs/ipc/json-rpc.k
  32. 41 0
      ngnk-libs/json/README.org
  33. 45 0
      ngnk-libs/json/json.k
  34. 31 0
      ngnk-libs/parsing/earley-example.k
  35. 51 0
      ngnk-libs/parsing/earley.k
  36. 330 0
      ngnk-libs/parsing/earley.org
  37. 21 0
      ngnk-libs/png/README.org
  38. 50 0
      ngnk-libs/png/example.k
  39. 58 0
      ngnk-libs/png/png.k
  40. 48 0
      ngnk-libs/png/png.py
  41. 26 0
      ngnk-libs/regex/example.k
  42. 80 0
      ngnk-libs/regex/re.k
  43. 417 0
      ngnk-libs/trees/README.org
  44. 33 0
      ngnk-libs/trees/bst-example.k
  45. 50 0
      ngnk-libs/trees/bst.k
  46. 57 0
      ngnk-libs/trees/examples.k
  47. 263 0
      ngnk-libs/trees/hsuthesis.k
  48. 194 0
      ngnk-libs/trees/lambda-calc-example.k
  49. 186 0
      ngnk-libs/trees/lambda-calc-walkthrough.txt
  50. 109 0
      ngnk-libs/trees/lambda-calculus.k
  51. 20 0
      ngnk-libs/trees/quadtree-example.k
  52. 37 0
      ngnk-libs/trees/quadtree.k
  53. 15 0
      ngnk-libs/trees/quadtrees.org
  54. 85 0
      ngnk-libs/trees/trees.k
  55. 31 0
      ngnk-libs/tutorial/README.org
  56. 390 0
      ngnk-libs/tutorial/ktour.txt
  57. 23 0
      ngnk-libs/tutorial/tutorial.k
  58. 56 0
      ngnk-libs/xml/README.org
  59. 9 0
      ngnk-libs/xml/edgecase.xml
  60. 57 0
      ngnk-libs/xml/walkthrough.txt
  61. 40 0
      ngnk-libs/xml/xml.k
  62. 0 30
      test.k
  63. 1 0
      test_no_newlinesk

+ 12 - 1
SS.k

@@ -1,3 +1,9 @@
+// import as re.*
+\d re
+\l ngnk-libs/regex/re.k
+
+\d .
+
 /helpers
 / e exclusive
 slice: {[l;s;e] s_(e-1)#l};
@@ -6,7 +12,7 @@ split: {[l;di] idx: (-1,&(di~'l)),(#l); {slice[x;y[0]+1;y[1]+1]}[l]'(idx@(2':!#i
 s2n: {[s] `i$s;}; // string to number
 parseidx: {core: slice[x;1;#x]; split[core;";"]};
 
- :parseidx["[1;2]"];
+/:parseidx["[1;2]"];
 /:split["1;2;3";";"];
 
 tstkey: "States/Events"; / key indicating start of transition section
@@ -34,3 +40,8 @@ dd: {[r] $[r[1;0]~"[";rd[r];r[0]]}; // detect dep
 / initialize data
 / f:<`"compiled_ss.k";
 / f 0: kcode;
+para:`0:
+testcases:{para"search ",z;`0:$x z;para"match ",z;`0:$y z}
+test:{para"---";para"regex: ", x;testcases[re.match[re.scmp@x];re.match[re.cmp@x]]'y;};
+cases:("abc";"xxxabbc";"abbbcyyy";"xxxabbbbcyyy");
+test["ab+c";cases]

+ 43 - 0
excelpm.k

@@ -0,0 +1,43 @@
+/ :`i$"A"; // 65
+/ :`i$"Z"; // 90
+/ :`i$"0"; // 48
+/ :`i$"9"; // 57
+/ :`i$":"; // 58
+/ :`i$"$"; // 36
+
+testAtZ: { i:`i$x; $[(i>64) & (i<91);1;0] };
+test0t9: { i:`i$x; $[(i>47) & (i<58);1;0] };
+testcol: { i:`i$x; $[i=58;1;0] };
+testend: { i:`i$x; $[i=36;1;0] };
+
+s0: ((testAtZ; 1);(test0t9; 2));
+s1: ((test0t9; 2);(testcol; 3));
+s2: ((testend; 7); (testcol;5));
+s3: (,(testAtZ; 5));
+s4: ((testAtZ; 5); (test0t9; 5));
+s5: (,(testend; 6));
+s7: (({`0:"SUCCESS"};6));
+// 7 is done
+states:: (s0;s1;s2;s3;s4;s5;s7);
+i: 1;
+
+run: {[str]
+ str,:"$";
+
+ // go over each char and run cur statoe
+ // cur state; start of pattern; length of pattern    <- args
+ idx::0;
+ o:(0;-1;-1) {[a;c] // TODO, add support for multiple active states
+  s: a[0];
+  r:{
+   :$[x[0][y];x[1];0]
+  }[;c]'states[s];
+
+  r: *($[(+/r)>0;r^0;0]);
+
+  idx +: 1;
+  :(r;$[(s=0)&(r>0);idx-1;a[1]];$[r=5;idx;a[2]])
+ }\str;
+
+ :o
+};

+ 18 - 0
exec.sh

@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Check if the file path is provided
+if [ -z "$1" ]; then
+  echo "Usage: $0 <file path>"
+  exit 1
+fi
+
+# File path from the first argument
+input_file="$1"
+# Define the output file name
+output_file="${input_file%.*}_no_newlines${input_file##*.}"
+
+# Remove newline characters and save to a new file
+tr -d '\n' < "$input_file" > "$output_file"
+echo "\n" >> "$output_file"
+
+echo "Newlines removed. Output file is: $output_file"

+ 7 - 0
ngnk-libs/README.org

@@ -0,0 +1,7 @@
+* ngnk-libs
+  A collection of projects written in K.
+
+Ideally some of these would serve as a basis for collaboration to create some generally useful tools
+for [[https://ngn.codeberg.page/][ngn/k]].  As there's not a lot of this around, there aren't any standards for how this
+should/could be organized.  I've decided to put each script in a separate directory.  Eventually
+I'll include some tests and hopefully some commentary as to how each might be used.

+ 33 - 0
ngnk-libs/audio/abc.k

@@ -0,0 +1,33 @@
+/  ABC music notation [[https://abcnotation.com/wiki/abc:standard:v2.1]]
+
+/t:"C,^a2/3(3__d>G'=Z4c''A128e//f/3"
+is:{&(x="z")|~0 8'-"a"-x:_x}                                               / index of base note
+ns:{-1_*/1(,[;0]1_<':)\0{(1&y)*y+10*x}\*/1(~0 10')\-"0"-x,"0"}             / calculated numbers
+tk:{-/|(::;n@)@'(.w)*'/:1~:\~n@w:w@=x'w:@/1<:\(&1&n:ns z),(&|/y=\:z)}
+ds:{w:is[x]; $[~#d:tk[w;"(/<>";x];:1.0+&#w;]
+    r:+{$[1=c:+/&\"/"=x@-y;(1%+/(y[1];2)*1~:\0<y 1;1+0<y 1)
+       (0=c)&0>*y;(1.0;0)
+       c>1;(1%c(2*)/1;c)
+       ("/"~x@-y 1)&/0<y[0 2];(%/y[0 2];3)
+       0<*y;(*y;1)
+       (1.0;0)]}[x]'d
+     b:{$[^"<>"?*b:(&\(=':).|1*:\)#x@-y;0N;(1(2-)\1%(#b)(2*)/1)1~:\">"=*b]}[x]'r[1]_'.d
+     {$[0N~z;x;@[x;y+!2;:;x[y]*z]]}/[@[1.0+&#w;!r;:;.r:r 0];!b;.b]}
+os:{(=/1_:\x@w)+@[&#x;&|~c;:;|-':(+\c)@&~c:|-/"',"=\:x]@w:is[x]}           / register of base note
+as:{(-/1(~^:)\"_=^"?*a;2*=/a:x@is[x]-/:1 2)}                               / accidentals
+
+td:2 3 4 5 6 7 8 9!3 2 3 2 2 2 3 2
+tpl:{d:{(0N;td[r 0];r 0)^'r:@[;!3]@*'1_'_/|1(0,&":"=x@-:)\1,y}[x]'tk[w:&"("=x;,":";x]
+     $[~#w;:1.0+&#is[x];]
+     a:+,/'.((,'=(w@!d)'i:is[x])_-1;(%/|:)''0 2_/:d)
+     {[d;i;r;c]@[d;(c&#i)#i;*;r]}/[1.0+&#i;;;].a}
+/ds t
+key:0 2 3 5 7 8 10
+scale:0 2 2 1 2 2 2
+range:"cdefgab"
+sig:{r:-/(y+key[b]+\scale;12/(::;key@)@'g:7 7\(!7)+b:range?x);r@<*|g}  / adjustments to notes by key signature
+
+mel:{s:sig.(x);(n;m):1(~^:)\key@b:range?_y@is[y];a:*as[y]              / calculate melody from notes,
+     n+m*(12*os[y])+0^{x,z^y+y*y*z}/[();*as[y];s@b]}                   /   accidentals and signature
+
+/ Plenty of room for nonsensical input.

+ 75 - 0
ngnk-libs/audio/example.k

@@ -0,0 +1,75 @@
+\l wav.k
+
+/ Think DSP           [[https://greenteapress.com/wp/think-dsp/]]
+/ Mathematics of DFT  [[https://ccrma.stanford.edu/~jos/mdft/]]
+/ See also            [[http://arlyle.sdf.org/k/sound.k]]
+
+para:`0:
+/ sine wave: 25 samples for one cycle, 440 cycles for 11000 samples per second
+FR:11025          / framerate
+HZ:27.5           / frequency
+AMP:0.5           / amplitude
+DR:1.0            / duration
+WIDTH:2           / sample width in bytes
+rng:2/~!8*WIDTH   / quantization range
+
+nt:note[12]      / twelve tone system
+hz:8*HZ          / Base note
+
+mel:{s:+/12 -12*(,'"+-")~/:\:t:(~#:)_" "\x;,/(((~^n)*+\s)+n:0 2 3 5 7 8 10"abcdefgr"?t)@&~s}
+chrd:{+/1(+\12*<':)\+/(+\0 2 1 2 2 1 2;-1 0 1)@'("abcdefg";"b #")?'+(" "\x)@/:\:0 1}
+dur:{*/(1%4(2*)\1;1.5 1)@'("whqes";". ")?'+((~#:')_" "\x)@/:\:0 1}
+
+mkpcm:{z 1:mkchnk["RIFF"]@,/("WAVE";mkfmt pcm.(x);mkchnk["data";y])}
+ser:{,/+|`c$(x#256)\y}
+
+ys:+/sin[;AMP;0;ph[FR;DR]]'hz*nt[chrd "c eb a"]
+
+para"Play a chord: ",wav:"test1.wav"
+mkpcm[(1;FR;WIDTH);ser[WIDTH]qnt[rng]apo[0.015]AMP*nrm[1-1e-15]ys;wav]
+para""
+
+hz*:2
+tune:("g g g d e c  r r + c"
+      "e q q e q e. s q   e.")
+
+tone:{$[^y;&#z;apo[0.05;sin[hz*nt y+2;AMP;0]z]],&_(1-x)*(1%x)*#z}
+
+ys:,/tone[0.65].'+(mel@;ph[FR]'0.65*dur@)@'tune
+
+para"Play a tune: ",wav:"test2.wav"
+mkpcm[(1;FR;WIDTH);ser[WIDTH]qnt[rng]apo[0.015]AMP*nrm[1-1e-15]ys;wav]
+para""
+
+ys:sin[hz*nt[2];0.5*AMP;0;ph[FR;DR]]
+/ys+:cos[2*hz*nt[4];1.5*AMP;0;ph[FR;DR]]
+
+para"Play a note: ",wav:"test3.wav"
+mkpcm[(1;FR;WIDTH);ser[WIDTH]qnt[rng]apo[0.015]AMP*nrm[1-1e-15]ys;wav]
+para""
+
+/ WARNING:  Be cautious with the volume while testing!!
+
+para"Parse a wav file: -> (depth; tag; data)"
+para"RIFF format is a tree structure.  Depth is the depth vector for that tree."
+2#(dp;tg;dt):p:prs@1:wav
+para""
+
+para"Map tags to data"
+mp:!/1_p
+para"fmt:"
+mp"fmt "
+para"parse fmt:"
+prsfmt mp"fmt "
+para"data length"
+#mp"data"
+
+
+\d abc
+\l abc.k
+\d .
+
+mktun:{ ys:,/tone[s].'+(abc.mel[y]@;ph[FR]'(s:0.65)*0.25*/(abc.ds;abc.tpl)@\:)@\:z
+ mkpcm[(1;FR;WIDTH);ser[WIDTH]qnt[rng]apo[0.015]AMP*nrm[1-1e-15]ys;x]}
+
+mktun["test4.wav";("c";-1)]"G/GGD/EC/>>z/zc/>z/"

+ 8 - 0
ngnk-libs/audio/riff.k

@@ -0,0 +1,8 @@
+/ [[https://en.wikipedia.org/wiki/Resource_Interchange_File_Format]]
+/ samples: [[https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples.html]]
+
+chnk:{(i;l;d):0 4 8_*x;(d;r):(0;+/1(2!)\l:256/|256!l)_d;(r;x[1],,(i;l#d))}
+chnks:{*|(#*:)chnk/(x;())}
+prs:{+*{{$[|/("RIFF";"LIST")~\:z[0];(,(y;z[0];4#z[1])),,/x[y+1;4_z[1]];,(y;z[0];z[1])]}[o;x]'chnks y}[0]x}
+
+mkchnk:{,/(x;`c$|(4#256)\#y;y,(2!#y)#0x00)}

+ 35 - 0
ngnk-libs/audio/wav.k

@@ -0,0 +1,35 @@
+/ [[https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html]]
+\l riff.k
+
+/ pi continued fraction A001203
+pcf:.'", "\"3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1, 1, 2, 2, 2, 2, 1, 84, 2, 1, 1, 15, 3, 13, 1, 4, 2, 6, 6, 99, 1, 2, 2, 6, 3, 5, 1, 1, 6, 8, 1, 7, 1, 2, 3, 7, 1, 2, 1, 1, 12, 1, 1, 1, 3, 1, 1, 8, 1, 1, 2, 1, 6, 1, 1, 5, 2, 2, 3, 1, 2, 4, 4, 16, 1, 161, 45, 1, 22, 1, 2, 2, 1, 4, 1, 2, 24, 1, 2, 1, 3, 1, 2, 1"
+pi:{y+1%x}/|pcf
+pi2:2*pi
+
+fspec:`t`nc`ss`abs`ba`bs`es`vb`cm`sf!2 2 4 4 2 2 2 2 4 16
+fmt:{((#r)#!fspec)!r:256/'256!|'-1_(<[#x;]_+\0,.fspec)_x}
+prsfmt:{((#c)#!fspec)!256/'256!|'(c:0,((#x)>)#+\.fspec)_x}
+
+pcm:{[c;r;l]`t`nc`ss`abs`ba`bs!(1;c;r;r*l*c;l*c;8*l)}
+mkfmt:{mkchnk["fmt ";,/(256+&'fspec@!v){|`c$x\y}'v:(^:)_k!x@k:!fspec]}
+
+alaw:{s:1 -1@0>y;s*$[1>r:s*y*x;r;1+`ln r]%1+`ln x}o
+alawinv:{s:1 -1@0>y;a:1+`ln x;s*$[1>r:s*y*a;r;`exp@-1+r]%x}
+
+apo:{@[y;+i;*;%[;s]@*i:1((#y)-1+)\!s:_x*#y]}
+
+sin:{[f;a;s;t]a*`sin s+pi2*f*t}  / [HZ;AMP;off;ts]
+cos:{[f;a;s;t]a*`cos s+pi2*f*t}  / [HZ;AMP;off;ts]
+
+qnt:{_x*y+~x>128}                / [rng;ys] quantization
+ph:{(!_x*y)%x}                   / [FR;dur] phases
+nrm:{x*y%|/{x|-x}(|/;&/)@\:y}
+ser:{,/+|`c$(x#256)\y}           / [WIDTH;qnt data] serialize
+
+
+/ [[https://en.wikipedia.org/wiki/Equal_temperament]]
+note:{`exp y*(`ln 2)%x}
+
+/
+  sw:,/440#,_128*`sin@(2*pi*!50)%50
+  mkchnk["RIFF"]@,/("WAVE";mkfmt pcm[1;22000;1];mkchnk["data";`c$sw])

+ 60 - 0
ngnk-libs/bigint/bigint.k

@@ -0,0 +1,60 @@
+red:{f:0 127@x;,/|1(#/|f,x=64>*:)\(&\f=)_y}
+
+leb80:{`c$|+/1(128*&1,-1+#:)\x}
+
+/ unsigned leb8
+leb8:{leb80[128\x]}
+unleb8:|+/1(128*0>)\0+
+
+/ signed leb8
+leb8s:{s:x<0;leb80 red[s]((::;neg)s)[128\(1 -1s)*x]}
+
+/ unsigned padding
+pad:{0^@\:/1(!|/#:')\|:'(!0),/:(x;y)}
+
+/ signed padding
+pads:{(127 0@64>*p[~i])^(|'p)@\:!s@i:*>s:#'p:(!0),/:(x;y)}
+
+neg:{$[64>*x;add[1;127-x];127-add[-1;x]]}
+
+add0:{,/(::;|x\)@'{(r;c):y;(c;d):(0;x)\+/c,z;(r,d;c)}[x]/[(!0;0);+y]}
+
+/ unsigned addition with given base
+addb:{(&\~:)_|add0[x]pad[y;z]}
+
+/ unsigned addition
+/add:{(&\~:)_|add0[128]pad[x;y]}
+add:addb[128]
+
+/ signed addition
+adds:{r:|add0[128]pads[x;y]
+  $[=/s:~64>*'(x;y)
+    (((*s)=64>*r)#(0 127@*s)),r:(*s)_r
+    _/|1(0 0~@[;!2]@)\(64>*r)_r]}
+
+/ unsigned multiplication
+mul:{{add/(x,0;y)}/@[&1+|/i;i:+/!#'x;add;,/*\:/x:(x;y)]}
+
+/ unsigned subtraction using signed addition
+sub:{(x;y):,'/|1(#\:/|0,,~64>*:')\(x;y);(&\~:)_adds[x;neg y]}
+
+dm0:{2#(#*|:){(i;p;d):y
+  $[(~#p)&(~c:-/#'h)&j:*<h:(x[i];*d)
+     (i;p;(*d;!0),'(0,1)_*|d)
+    (0>c)|(~c)&~j
+     (i+1;x[i];d)
+    (i;(sub[*d;p]),*|d;())]}[y]/(0;!0;(0,#*y)_z)}
+
+divmod:{{$[*<+|1#:'\(*x;*|y);y;(*y;!0),'dm0[128;x;*|y]]}[x]/(!0;y)}
+
+// Faster, but does most math in host K and so has limits.
+
+\d fast
+pad:{0^x@\:!|/#:'x:|'(!0),/:(x;y)}
+str:,/|1(&~#:)\(&\~:)_
+car:{str{(x\z+*y),1_y}[0,x]/[,0;y]}
+
+add:car[128]@+/pad@
+mul0:{@[&1+m;(m:|/i)-i:+/!#'x;+;,/*\:/x:(x;y)]}
+mul:{car[128]mul0[x;y]}
+\d .

+ 45 - 0
ngnk-libs/bigint/examples.k

@@ -0,0 +1,45 @@
+\l bigint.k
+
+/ -- (not (n base 2)) ~ ((127 - n) base 2) for all base 127 digits n --
+`0:"-- use subtraction to not bits --"
+~/(~2\!128;2\127-!128)
+
+/ -- bigints use lists of digits --
+/ Signed 7-bit numbers use the most significant bit for the signed bit
+/ negation uses twos complement
+`0:"-- twos complement --"
++2\*'(,59; \neg[,59])
+
+/ -- since we're base 128, numbers which use the 7th bit need a leading zero --
+/ to provide a signed bit.
+`0:"-- extra digit for signed bit --"
+,/'+'2\'(0 69; \neg 0 69)
+
+/ -- double negation is the identity --
+`0:"-- negation is own inverse --"
+neg neg[,69]
+neg neg[0 69]
+
+/ -- for unsigned base 128 bigints, extra digit is not needed --
+`0:"-- unsigned addition --"
+add[,69;72]
+
+/ -- signed addtion follows the convention above --
+`0:"-- signed addition --"
+adds[,69;,72]
+
+/ -- Handling the signed bit is tricky --
+/ test cases
+#T:,/,/:\:/2#,,',/1 neg'\T0:,/1(+|1(0 127@64>)\)\12 63 121
+/ convert to integers
+c:{(-1 1s)*128/((neg;::)@s:64>*x)x}
+/ test that digit-wise addition matches addition
+/ covers (all?) the odd sign bit cases
+`0:"-- addtion test --"
+~/(+/'c''T;c'adds.'T)
+
+/ divisor times all positive digits base 128
+`0:"-- Render base 10 --"
+ms:128\'10*1_!128
+
+($n)~ \,/$|*|(#*:){(!0;*|x),' \divmod[ms;*x]}/( \128\n:72932651;!0)

+ 10 - 0
ngnk-libs/classic-cs/README.org

@@ -0,0 +1,10 @@
+* A Collection of classic CS algorithms
+  This is just a collection of classic CS algorithms implemented in [[https://ngn.codeberg.page/][ngn/k]].  Some may be useful in
+  actual programming contexts but the primary motivation was a mix of practice and exposition.
+
+  ngn/k is afterall interpretted and therefore can't compete with algorithms hand-crated in lower
+  level languages.  On the other hand, you don't always have the luxury to mix languages just to
+  use a particular algorithm in your project.
+
+* Index
+  Not today, but hopefully some day.

+ 44 - 0
ngnk-libs/classic-cs/heap.k

@@ -0,0 +1,44 @@
+/ binary heap
+\d hpbn
+/ shift down
+sd:{$[(y<z)&0<c:*>x@i:(z>)#y,-1 0+2*y+1;o[@[x;y,i[c];:;x@i[c],y];i[c];z];x]}
+
+/ shift up
+su:{$[y&>/v:x@y,s:-2!y-1;o[@[x;s,y;:;v];s];x]}
+
+/heapify
+hp:{sd/[x;|:!c:1+-2!-2+#x;#x]}
+
+/ heap insert
+hi:{su[x,y;#x]}
+
+/ max extract
+hx:{(*x;sd[(*|x),-1_1_x;0;#x])}
+
+/ heap sort
+hs:{h:hp@x;*(-1+#x)({(sd[@[x;0,y;:;x@y,0];0;y];y-1)}.)/(h;-1+#x)}
+
+/ binomial heap
+\d hpbl
+
+/ heap insert
+hi:{mg@hi0[x;y]}
+hi0:{(r;p;d;n):x;(r,#p;p,#p;d,0;n,y)}
+
+/ merge heap
+mg:{(r;p;d):-1_x
+    *{$[2>#l:*|x;x
+      ~=/x[0;2;2#l];x
+     (cm[*x;2#l];1_l)]}/(x;@/1(<d@)\r)}
+
+/ combine (trees)
+cm:{(r;p;d;n):x;y:@/1(<n@)\y
+    r:r_r?y@0;p[y@0]:y@1;d[y@1]+:1
+    (r;p;d;n)}
+
+/ heap extract
+hx:{(r;p;d;n):x;m:*>n@r
+    p:@[p;r@m;:;0N]
+    p:@[p;c;:;c:&p=r@m]
+    (n@r@m;mg@(c,r_m;p;d;n))}
+\d .

+ 31 - 0
ngnk-libs/classic-cs/sparse-example.k

@@ -0,0 +1,31 @@
+\d sm
+\l sparse.k
+\d .
+smm:sm.mm   / sparse matrix multiply
+sp:sm.m2sm  / sparsify matrix (SCR format)
+
+sh:-1_#'*:\
+
+`prng@0
+mk:{(x#0)@[;;:;1]'1?/:#/x}
+
+/ random pair of sparse matrices
+(ml;mr):mk' \ds:0N 2#(5+3?15)0 1 1 2
+
+/ total number of elements in full matrix
+*/+ds
+
+/ sparse matrix multiply
+(w;v):smm.(sp'(ml;mr))
+
+/ convert results into a sparse matrix
+sm.spf[v;w;1+*|/w]
+
+/ full multiplication
+:mm:+/'ml*\:mr
+
+/ shape of result matrix
+:d:(sh'(ml;mr))@'!2
+
+/ regression test
+mm~d#@[&*/d;d/'w;:;v]

+ 15 - 0
ngnk-libs/classic-cs/sparse.k

@@ -0,0 +1,15 @@
+spf:{(,/'(x;.y[1]@d)),,+\0,@[&z;!d;:;]@#'x:x@d:=*y:+y}  / sparsify (CSR)
+nnz:{(((,/x)@&,/;+&:)@\:~~x),,#x}                       / non-zero entries and row count
+m2sm:{spf.(nnz@x)}                                      / matrix to sparse matrix
+
+rws:{@[i;(-1_x[2])_i:!#x[0];:;!-1+#x[2]]}    / rows
+ci:{p@\:{x^x^y}/!'p:='(x;y)}                 / common indices
+
+rc:{(rws;@[;1])@\:x}                         / rows/columns
+cx:+,/,/,/:\:'/                              / match elements
+enc:{b:1+|/'x;(b;b/x)}                       / encode
+
+mm:{(l;r):`row`col!/:rc'(x;y)
+    j:*|e:enc@cx(l`row;r`col)@'w:ci[l`col;r`row]
+    c:*/cx((x;y)@\:0)@'w
+    (e[0]\'w;v@w:&~~:v:@[;;+;]/[&1+|/j;j;c])}

+ 60 - 0
ngnk-libs/classic-cs/state-machines/example.k

@@ -0,0 +1,60 @@
+\l parse.k
+
+para:`0:
+para"Make a state machine that parses quoted strings handline escapes"
+para"----------------------------------------------------------------"
+para"First identify the relevant characters and give the classes names."
+
+:G:("\"\\";"QS") / quotes and slashes
+para"Define a table indicating relevant transitions."
+para"The first row and column are labels and the rest are transitions."
+para"By convention the first class is . representing \"everything else\"."
+:Q:(" .QS"
+    "..Q."
+    "QQ.E"
+    "EQQQ")
+
+para"\nParse table to generate the classification table and transition matrix."
+:(C;TT):sm[G;Q]
+
+para"\nSome text:"
+`0:txt:"This is \"text\" with \"quotes\" and an escaped escape \"here\\\\\" but not here: \\\\"
+
+para"\nParse text using config and start state."
+s:1_*+Q
+c:1_*Q
+P:prs[(C;TT);s?"."]
+
+s@P@txt
+
+para""
+para"With the convention that the first class is \"everything else\", you can combine state tables mechanically."
+
+:G:("\"/\\\n ";"QSENW";"..Q...W") / quotes, slashes, newlines and spaces
+
+para"Comments start at slash and end at newlines, but must be preceded by whitespace."
+:C:(" .SNW"
+    "....W"
+    "W.C.W"
+    "CCC.C")
+
+para"To aggregate, need the order of the classes in the aggregate table"
+para"and which states to transition to from the generic class ."
+para"Each sub-state machine declares which classes it handles explicitly."
+para"(i.e. which classes don't default to handling as a generic class.)"
+
+:T:mrg[-2#G;((Q;,"Q");(C;""))]
+
+s:1_*+T
+c:".",G[1]
+para"\nMake a comment:"
+`0:txt:"This is \"text\" with \"quotes\" and an / escaped escape \"here\\\\\" but not here: \\\\"
+
+P:prs[sm[2#G;T];s?"."]
+para"\nParse"
+`0:pp:s@P@txt
+`0:txt
+para""
+`0:,/":",/:+(pp;txt)
+
+para"\nChallenge: Change state machine to include the closing quote."

+ 11 - 0
ngnk-libs/classic-cs/state-machines/parse.k

@@ -0,0 +1,11 @@
+sm:{C:@[&256;x[0];:;1+!#x[0]]                    / create a state machine with character classes
+    (C;((1_*+y)?(1_'1_y))@\:(".",x[1])?1_*y)}    / x and table y
+
+prs:{y{x[y;z]}[x[1]]\x[0]@z}     / parse text z with state machine x and initial value y
+
+spl:{(,x[0]),1_+(+y)@(*y)?x[1]}  / split character classes using a mapping
+
+mrg0:{f:&'"."=r:@/1(&^" ."?*:')\1_z[0]@\:1^(*z[0])?x  / preparse sub-state machine for merging
+      @[;;:;]'[r;f;y@f:f^\:x?z[1]]}
+
+mrg:{cs:" .",x[0];((,cs),,/mrg0[cs;x[1]]'y),,x[1]}

+ 12 - 0
ngnk-libs/compression/README.org

@@ -0,0 +1,12 @@
+* Compression in K
+  This may end up having more stuff, but now it's just got sketch of an lz4 tool
+
+** LZ4
+   Currently just a sketch of a [[https://github.com/lz4/lz4/tree/dev][lz4]] decompression tool.  See example.
+
+*** Caveats
+  - Doesn't currently fully process all [[https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md][frame options]] and hence doesn't fully process the frame
+  - The ~dcm~ function evaluates the frame descriptor and then processes the block data
+  - If there is a content checksum then it's just in the leftover data
+  - Content Size is calculated but not used
+  - Block Checksums are simply ignored

+ 47 - 0
ngnk-libs/compression/lz4.k

@@ -0,0 +1,47 @@
+// decompression
+LZ4HDR:0x184D2204
+islz4:LZ4HDR~|4#
+
+flgs:{(vs;b):0 2_(8#2)\x
+  $[~0 1~vs;`err"unsupported version";]
+  `bi`bc`cs`cc`"_"`did!b}
+bdb:{`"_"`bmx`"_"!2/'0 1 4_(8#2)\x}
+
+frm:{islz4@x
+  (f;b):(flgs;bdb)@'x[4 5]
+  off:8 4*`cs`did#f
+  sz:256/256!|x@7+!off`cs
+  did:x@7+off[`cs]+!off`did
+  ((7++/off)_x;f;b;sz;did)}
+
+blks:{(out;in):y
+  sn:$[x`bi;"";out]
+  $[0x00000000~s:|4#in;out
+    0>*s;(out;""),'(4+0,256/(256!s)-128,3#0)_in
+    [(b;r):(4+0,256/256!s)_in
+     r:(4*x`bc)_r                      / block checksum.  Just drop for now.
+     (out,*(#*|:)f/(sn;b);r)]]}
+
+tk:{1++/&\255=7#x}
+
+f0:{(r;s;l;m):0 0,16 16\**(t;b):0 1_x
+  $[l=15;[(s;b):_/|1(0,tk@)\b;l:+/l,s];]
+  $[m=15;m:+/m,#/r:|1 tk\(2+l)_b;]
+  (l;4+m;256/|b[l+!2];*r;b)}
+
+f:{(l;m;off;r;b):f0[*|x]
+     m*:lst:~l=#b
+     (n,m#(-off)#n:(*x),l#b;(l+r+2*lst)_b)}
+
+dcm:{0 4_'(~0x00000000~4#*|:) blks[*flg]/("";**(data;flg;rest):0 1 2_frm x)}
+
+/
+// compression
+:t:"this his repeats because repeating helps"
+/:t:"haaaaaaaaa\0"
+*{ \(r;s;lb):y
+  r:$[(x-1)<@/i:1(*>:)\(*lb)*~|/'a:z=s[+/lb]
+      r,,((#s)-*i;i:|/'lb@\:*|i);r]
+  (r;s,z;1 0+(0;,&z=s),'(*lb;a)@\:&0<#'a:(*|lb)@'&'a)}[4]/[(();"";(,-1;,,0));t]
++(!#t;t)
+\

+ 13 - 0
ngnk-libs/compression/lz4example.k

@@ -0,0 +1,13 @@
+\l lz4.k
+
+/
+Generated from command line tool as follows:
+$ lz4 -v <(echo cause this has repeats because repeating helps the cause) /tmp/lz4.test
+$ lz4 -v <(echo this is compressed) /tmp/lz4.test
+$ lz4 -v <(echo this is compressed this is compressed this is not compressed) /tmp/lz4.test
+\
+T:( 0x04224d186440a736000000f20a636175736520746869732068617320726570656174732062651900021000f005696e672068656c7073207468652063617573650a00000000e82e5379
+  0x04224d186440a7130000807468697320697320636f6d707265737365640a000000007bf252f1
+  0x04224d186440a724000000ff047468697320697320636f6d7072657373656420130008336e6f742a0050737365640a0000000070d21641)
+
+dcm'T

+ 17 - 0
ngnk-libs/csv/README.org

@@ -0,0 +1,17 @@
+* CSV parser
+  A simple comma separated value parser for [[https://ngn.codeberg.page/][ngn/k]].  Currently it uses hard-coded (easily modified)
+  variables for the escape character and row and field separators.
+
+  It also currently works on the whole file as that's the typical way to interact with files in
+  ngn/k.  It shouldn't be too hard to make it work on blocks of bytes by first splitting on the
+  "true" (i.e. not quoted) row separators and parsing all but the last leaving it (possibly
+  incomplete) row to be prepended to the next chunk.
+
+  Also, it does no transformations on the data whatsoever currently.  It simply splits on rows and
+  fields while respecting quoting.  Data can be cast to appropriate types as post processing,
+  possibly with a well defined API as that [[https://code.kx.com/q/ref/file-text/#load-csv][used by kx]].
+
+** History
+   This was pretty much my first K program ever, though it's gone through a few iterations.  I
+   originally wrote this in [[https://groups.google.com/g/kona-user/c/53T4Rzt_STQ/m/UgkCxVT_CAAJ][kona]].  (You can follow the iterations in that thread.)  Later ColTim
+   [[https://discord.com/channels/821509511977762827/821511172305846322/944305697846341702][punched it up some]] after doing some performance testing on it.

+ 10 - 0
ngnk-libs/csv/csv.k

@@ -0,0 +1,10 @@
+/ ngn/k
+\d csv
+ESC:  "\""    / escape
+ROW:  "\n"    / row separator
+FLD:  ","     / field separator
+
+uq:{$[*q:x=ESC;-1_x@&~q&2!+\q;x]}          / unquote
+sp:{[s;x](1&l)_'(l:0,&(2!+\x=ESC)<x=s)_x}  / split
+csv:{uq''sp[FLD]'sp[ROW;x]}                / parse CSV
+\d .

+ 43 - 0
ngnk-libs/ffi/README.org

@@ -0,0 +1,43 @@
+* Foreign function interface
+  [[https://codeberg.org/ngn/k][ngn/k]] supports a foreign function interface for using functions
+  written in C from within the interpretter.  There is a basic example
+  of such [[https://codeberg.org/ngn/k/src/branch/master/x][an extension]] in the distributed source code, but it's fairly bare.
+
+  The API was designed to be able to share extensions with [[https://github.com/ktye/i][kyte/i]] by
+  supporting a common header file.  (The file [[https://codeberg.org/ngn/k/src/branch/master/k.h][k.h]] in ngn/k refers to
+  the one in ktye/i.)  There are more extensive examples in ktye/i but
+  some small changes are required to accomodate actual differences in
+  the languages.  For instance, ngn/k requires using ~,:~ to indicate
+  the use of monadic enlist rather than dyadic join whereas ktye is
+  able to disambiguate.
+
+  This project provides a makefile and a couple of diffs to make those
+  extensions compatible with ngn/k.  This is mostly for demonstration
+  purposes and to provide more examples of ngn/k's ffi capabilities.
+
+  Note that I built this for OSX and so dynamic libs are ~.dylib~
+  files.  ~make.diff~ is a diff to ngn/k's makefile to accommodate
+  this.  Also, the reference to the libraries points at dylib files.
+  It shouldn't be hard to adapt this for linux.
+
+  ~ext.diff~ is a diff file to the four extensions in ktye/i that were
+  available at the time of writing.  That is ~sqlite~, ~mat~, ~draw~
+  and ~ray~.  You should sync those directories into this project
+  before running ~make~.  Then you should be able to run all of the
+  ~.k~ files with ngn/k from this directory or any directory after
+  properly setting up ~LD_LIBRARY_PATH~.
+
+  This example uses ngn/k's ~2:~ function to dynamically load
+  extensions into a running instance of the interpretter.  Other
+  approaches are possible including linking the extensions into the
+  executable at build time or using ~LD_PRELOAD~ or some similar
+  mechanism to load them into the interpretter at startup.  This last
+  might require a change to call ~kinit()~ from within the "load"
+  function to insure the interpretter has been properly initialized
+  before loading the extension.
+
+  Finally, just to hammer the point home, the ffi /makes it possible/
+  to use functions written in C from within the interpretter, but it's
+  not free.  You can't load arbitrary C functions.  You need to write
+  a wrapper to allow the interpretter to interact with the various
+  types.  These files serve as examples such wrappers.

+ 196 - 0
ngnk-libs/ffi/ext.diff

@@ -0,0 +1,196 @@
+diff --color -ur ktye/i/+/sqlite/sqlite.c sqlite/sqlite.c
+--- ktye/i/+/sqlite/sqlite.c	2024-05-26 20:39:33
++++ sqlite/sqlite.c	2024-06-06 14:16:18
+@@ -49,13 +49,13 @@
+    default:;
+     int nb = sqlite3_column_bytes(res, i);
+     K c = KC((char *)sqlite3_column_blob(res, i), (size_t)nb);
+-    l[i] = Kx(",", l[i], Kx(",", c)); // l,,c
++    l[i] = Kx(",", ref(l[i]), Kx(",:", c)); // l,,c
+    }
+   }
+   row0 = 0;
+  }
+  
+- K t = Kx("+", Kx("!", keys, KL(l, cols)));
++ K t = Kx("+:", Kx("!", keys, KL(l, cols)));
+  free(l);
+  
+  return t;
+@@ -67,10 +67,10 @@
+ static void addTable(sqlite3 *db, K name, K t){ // add k table to sqlite db (https://qastack.com.de/programming/1711631/improve-insert-per-second-performance-of-sqlite)
+  K l2[2]; LK(l2,t);
+  
+- K p = Kx(",", KC0("INSERT INTO "), Kx("$", ref(name))); // p:"insert into tname values(?,?,..)"
++ K p = Kx(",", KC0("INSERT INTO "), Kx("$:", ref(name))); // p:"insert into tname values(?,?,..)"
+  p = Kx(",", p, KC0(" VALUES("));
+  
+- K q = Kx(",", KC0("CREATE TABLE "), Kx("$", name));     // q:"create table tname(col1 type1, col2 type2, ...)"
++ K q = Kx(",", KC0("CREATE TABLE "), Kx("$:", name));     // q:"create table tname(col1 type1, col2 type2, ...)"
+  q = Kx(",", q, KC0("("));
+  
+  size_t nc = NK(l2[0]);
+@@ -79,7 +79,7 @@
+  
+  for(int i=0;i<nc;i++){
+   p = Kx(",", p, Kc('?'));
+-  q = Kx(",", q, Kx("$", Kx("@", ref(l2[0]), Ki(i))));
++  q = Kx(",", q, Kx("$:", Kx("@", ref(l2[0]), Ki(i))));
+   K ty;
+   switch(TK(cols[i])){
+   case 'I':  ty = KC0(" INTEGER"); break;
+@@ -113,7 +113,7 @@
+    case 'I': sqlite3_bind_int(stmt, 1+i, iK(v)); break;
+    case 'F': sqlite3_bind_double(stmt, 1+i, fK(v)); break;
+    case 'S':;
+-    K s = Kx("$", v);
++    K s = Kx("$:", v);
+     sqlite3_bind_text(stmt, 1+i, dK(s), NK(s), NULL);
+     unref(s);
+     break;
+@@ -176,7 +176,7 @@
+  size_t n= NK(names);
+  K *l = malloc(sizeof(K)*n);
+  K q = KC("select * from ", 14);
+- for(int i=0;i<n;i++) l[i] = getTable(db, Kx(",", ref(q), Kx("$", Kx("@", ref(names), Ki(i))))); // q,$names@i
++ for(int i=0;i<n;i++) l[i] = getTable(db, Kx(",", ref(q), Kx("$:", Kx("@", ref(names), Ki(i))))); // q,$names@i
+  K r = Kx("!", names, KL(l, n));
+  unref(q);
+  free(l);
+@@ -232,9 +232,9 @@
+  return r;
+ }
+ 
++__attribute__((constructor)) void loadsql();
+ 
+ void loadsql() {
+-  exit(-1);
+  KR("sqlite", (void*)sqlite, 1);
+  KR("sqlq",   (void*)sqlq,   2);
+ }
+diff --color -ur ktye/i/+/sqlite/sqlite.k sqlite/sqlite.k
+--- ktye/i/+/sqlite/sqlite.k	2024-01-11 10:40:45
++++ sqlite/sqlite.k	2024-06-06 14:05:07
+@@ -1,7 +1,9 @@
++`"libsqll.dylib"2:(`loadsql;1);
++
+ t:+`a`b`c`d!(1 2;3 4.;`abc`def;("alpha";"beta"))
+ c:sqlite `t!,t
+-s:sqlq[c;"select sum(b) as M from t"]
++/s:sqlq[c;"select sum(b) as M from t"]
+ T:sqlite c
+-
+- \"query ",$$[7.~*s`M;`ok;`fail]
+- \"roundtrip ",$$[T~`t!,t;`ok;`fail]
++/
++ "query ",$$[7.~*s`M;`ok;`fail]
++ "roundtrip ",$$[T~`t!,t;`ok;`fail]
+diff --color -ur ktye/i/+/mat/mat.c mat/mat.c
+--- ktye/i/+/mat/mat.c	2024-01-11 10:40:45
++++ mat/mat.c	2024-06-06 12:50:50
+@@ -1,6 +1,6 @@
+ #include<stdlib.h>
+ #include<string.h>
+-#include"../k.h"
++#include"k.h"
+ 
+ // dgesv solve linear system (real)
+ //  x: L columns (input matrix)
+@@ -351,6 +351,8 @@
+ K dgesvD(K x){ return svd(x, 1, 'S'); }
+ K zgesvd(K x){ return svd(x, 2, 'N'); }
+ K zgesvD(K x){ return svd(x, 2, 'S'); }
++
++__attribute__((constructor)) void loadmat();
+ 
+ void loadmat(){
+  KR("dgesv", (void*)dgesv, 2);
+diff --color -ur ktye/i/+/mat/mat.k mat/mat.k
+--- ktye/i/+/mat/mat.k	2024-01-11 10:40:45
++++ mat/mat.k	2024-06-06 12:59:19
+@@ -1,9 +1,10 @@
+ /lapack tests
++`"libmat.dylib" 2:(`loadmat;1);
+ 
+-test:{x;y;$[x<y;"ok ";"fail "]," "/:$(x;"<";y)}
+-
+-FZ:{`f@'x} /ktye convert matrix z to f
+-fz:{`f@x } /             vector
++test:{x;y;$[x<y;"ok ";"fail "]," "/$(x;"<";y)}
++abs:{x|-x}
++FZ:{`f$'x} /ktye convert matrix z to f
++fz:{`f$x } /             vector
+ 
+ /dgesv
+ A:+(6.80 -6.05 -0.45 8.32 -9.67
+diff --color -ur ktye/i/+/draw/align.k draw/align.k
+--- ktye/i/+/draw/align.k	2024-01-11 10:40:45
++++ draw/align.k	2024-06-06 14:37:56
+@@ -1,3 +1,5 @@
++`"libdraw.dylib"2:(`loaddrw;1);
++`"libray.dylib"2:(`loadray;1);
+ 
+ t:{(`color;255;    `Circle;x,4;`color;0;`text;(x;y;"Abcg",$y))}
+ T:{(`color;255*256;`Circle;x,4;`color;0;`Text;(x;y;"Abcg",$y))}
+diff --color -ur ktye/i/+/draw/clip.k draw/clip.k
+--- ktye/i/+/draw/clip.k	2024-01-11 10:40:45
++++ draw/clip.k	2024-06-06 14:37:52
+@@ -1,3 +1,6 @@
++`"libdraw.dylib"2:(`loaddrw;1);
++`"libray.dylib"2:(`loadray;1);
++
+ /canvas shows 3 (clipped) black circles
+ /cairo 1.17.2 shows only 1
+ 
+diff --color -ur ktye/i/+/draw/draw.c draw/draw.c
+--- ktye/i/+/draw/draw.c	2024-01-11 10:40:45
++++ draw/draw.c	2024-06-06 12:50:17
+@@ -1,7 +1,7 @@
+ #include<stdlib.h>
+ #include<string.h>
+ #include<cairo.h>
+-#include"../k.h"
++#include"k.h"
+ 
+ #include<stdio.h>
+ 
+@@ -290,6 +290,7 @@
+ static void rgb24(uint32_t *u, size_t n){ for(int i=0;i<n;i++) u[i] = ((u[i]&0xff)<<16) | ((u[i]&0xff0000)>>16) | u[i]&0xff00; }
+ 
+ 
++__attribute__((constructor)) void loaddrw();
+ 
+ void loaddrw(){
+  drawcmds = Kx("`color`font`linewidth`rect`Rect`circle`Circle`clip`line`poly`Poly`text`Text");
+diff --color -ur ktye/i/+/draw/draw.k draw/draw.k
+--- ktye/i/+/draw/draw.k	2024-01-11 10:40:45
++++ draw/draw.k	2024-06-06 14:32:52
+@@ -1,3 +1,5 @@
++`"libdraw.dylib"2:(`loaddrw;1);
++`"libray.dylib"2:(`loadray;1);
+ d:(`color;123;`Rect;0 0 100 50)
+ m:draw[d;100 50]
+ 
+diff --color -ur ktye/i/+/ray/ray.c ray/ray.c
+--- ktye/i/+/ray/ray.c	2024-01-11 10:40:45
++++ ray/ray.c	2024-06-06 14:35:53
+@@ -18,7 +18,7 @@
+ #include<string.h>
+ #include"raylib.h"
+ #include"rgestures.h"
+-#include"../k.h"
++#include"k.h"
+ 
+ 
+ K png(K); // ../img/img.c
+@@ -131,7 +131,9 @@
+ }
+ 
+ //show(50;10000#255) /red window 100x50
+-K show(K x){ Show(x, Ki(0), Ki(0)); }
++K show(K x){ Show(x, Ki(0), Ki(0)); return Ki(0); }
++
++__attribute__((constructor)) void loadray();
+ 
+ void loadray(){
+ 	KR("show", (void*)show, 1); // show image from data or png

+ 23 - 0
ngnk-libs/ffi/make.diff

@@ -0,0 +1,23 @@
+diff --git a/makefile b/makefile
+index bd93acfa..e76d84c6 100644
+--- a/makefile
++++ b/makefile
+@@ -1,8 +1,9 @@
+ MAKE:=$(MAKE) MAKEFLAGS=-j8
+ M=mkdir -p $(@D)
+ 0:;$(MAKE) k && $(MAKE) t #default target
+-k:;$(MAKE) a N=$@ R=k O='-O3 -march=native' L='-lm -ldl' #for calling k from dynamic libs, add: L='... -Wl,-E'
+-libk.so:;$(MAKE) a N=$@ R=$@ O='-fPIC -Dshared -fvisibility=hidden' L='-lm -ldl -shared'
++k:;$(MAKE) a N=$@ R=k O='-g -march=native' L='-lm -ldl' #for calling k from dynamic libs, add: L='... -Wl,-E'
++libk.so:;$(MAKE) a N=$@ R=$@ O='-g -fPIC -Dshared -fvisibility=hidden' L='-lm -ldl -shared'
++libk.dylib:;$(MAKE) a N=$@ R=$@ O='-fPIC -Dshared -fvisibility=hidden' L='-lm -ldl -dynamiclib -install_name $@'
+ libk.a:;$(MAKE) b N=$@ R=$@ O='-O3 -march=native -ffreestanding -lm -ldl -Dldstatic'
+ o/$N/%.o:%.c *.h;$M;$(CC) @opts $O -o $@ -c $<
+ o/$N/bin:$(patsubst %.c,o/$N/%.o,$(wildcard *.c));$(CC) $O -o $@ $^ @lopts $L # ;$(STRIP) -R .comment $@ -R '.note*'
+@@ -54,5 +55,5 @@ t21:k;l/runparts.k aoc/21  ;echo t21 done
+ t22:k;l/runparts.k aoc/22  ;echo t22 done
+ t23:k;l/runparts.k aoc/23  ;echo t23 done
+ 
+-c:;rm -rf o k libk.so libk.a #clean
++c:;rm -rf o k libk.dylib libk.a #clean
+ .PHONY: 0 c k w h a t tu td tg te te0 te1 te2 ta t15 t16 t17 t18 t19 t20 t21 t22 t23

+ 21 - 0
ngnk-libs/ffi/makefile

@@ -0,0 +1,21 @@
+K=${HOME}/K/ngnk/k
+RAYLIB=${HOME}/raylib/raylib/src
+OPTINC=/opt/local/include
+OPTLIB=/opt/local/lib
+
+0:libsqll.dylib libmat.dylib libdraw.dylib libray.dylib
+
+libsqll.dylib:sqlite/sqlite.c
+	$(CC) -I$K -L$K -I${OPTINC} -L${OPTLIB} -dynamiclib -install_name $@ -g -fPIC $^ -o $@ -undefined dynamic_lookup -lsqlite3
+
+libmat.dylib:mat/mat.c
+	$(CC) -I$K -L$K -L${OPTLIB}/lapack -rpath ${OPTLIB}/gcc12 -dynamiclib -install_name $@ -g -fPIC $^ -o $@ -undefined dynamic_lookup -llapacke
+
+libdraw.dylib:draw/draw.c
+	$(CC) -I$K -I${OPTINC}/cairo -L$K -L${OPTLIB} -dynamiclib -install_name $@ -g -fPIC $^ -o $@ -undefined dynamic_lookup -lcairo
+
+libray.dylib:ray/ray.c
+	$(CC) -framework CoreVideo -framework IOKit -framework Cocoa -framework GLUT -framework OpenGL -I$K -I${RAYLIB} -L$K -L${RAYLIB} -dynamiclib -install_name $@ -g -fPIC $^ -o $@ -undefined dynamic_lookup -lraylib
+
+c:
+	rm -rf libsqll* libmat* libdraw* libray*

+ 11 - 0
ngnk-libs/image/error-diffusion-example.k

@@ -0,0 +1,11 @@
+\l error-diffusion.k
+
+/ grey scale pbm with range 0 255
+/ This image is taken from [[https://georgeclinton.com/family/sylvester-stewart/]] and
+/ converted to a pnm using [[https://imagemagick.org]]
+
+img:.'3_0:"Sylvester-Stewart.pnm"
+
+"img.pnm"0:{s:-1_#'*:\x;("P1";" "/$s)," "/'$~255=ediff[setup[0 255;specs[y]]]@x}[img;"burkes"]
+
+`0:"done"

+ 59 - 0
ngnk-libs/image/error-diffusion.k

@@ -0,0 +1,59 @@
+pk:{x@*<{x|-x}x-y}
+
+pre:{+/y*0^z@(!#z)+/:|x}
+pres:{g:+g@/:\:!g:(x[0;1];x[1])@\:=**x;({+/y*0^z@x-1}.*g;{+/x@'0^y@!#x}[pre.'1_g])}
+fldrow:{(e,*y;(*|y),'(e:z-n;n:x[0]@z:z+x[1][*y]%x[3]))}
+pr:{fldrow[x]/[(,0.0;(!0;!0)); z+x[2][y]%x[3]]}
+ediff:{|*|{(,'*|pr[x;*y;z]),'y}[x]/[(,&#*y;0#,!0);y]}
+
+setup:{l:(w:&0<y[1])-*+&^y[1];f:.[y[1];]'+w;pk[x],pres[(l;f)], y[0]}
+
+specs:!/+(("floyd-steinberg"
+  (16;(0  0N  7
+       3  5   1)))
+ ("sierra"
+  (32;(0   0   0N  5   3
+       2   4   5   4   2
+       0   2   3   2   0)))
+ ("sierraTwo"
+  (16;(0   0   0N  4   3
+       1   2   3   2   1)))
+ ("sierraLite"
+  (4;(0   0N  2
+      1   1   0)))
+ ("burkes"
+  (32;(0   0   0N  8   4
+       2   4   8   4   2)))
+ ("stucki"
+  (42;(0   0   0N  8   4
+       2   4   8   4   2
+       1   2   4   2   1)))
+ ("jarvis"
+  (48;(0   0   0N  7   5
+       3   5   7   5   3
+       1   3   5   3   1)))
+  ("atkinson"
+   (8;(0   0N  1   1
+       1   1   1   0
+       0   1   0   0))))
+
+/
+
+Inspired by
+[[https://gist.github.com/0racle/f48586b6b2c05d13e3abbcb046e91dfb][0racle]]
+this is an implementation of several error diffusion dithering
+algorithms.
+
+Each spec is a divisor along with a distribution of the errors
+relative to the location of 0N.
+
+Each row is then scanned, collecting both output and errors
+(i.e. difference between the calculated input and the output).  Error
+accumulated from a single previous row is calculated by pre.  The
+function pres returns both a function which collects error for the
+current row and one which aggregates the calculated accumulated error
+for each previous row.
+
+The behavior of pres changes depending on the spec and are configured
+with setup.  The first argument to setup is the range of output values
+the second is the spec which is parsed to produce pre and pres.

+ 8 - 0
ngnk-libs/image/ordered-dither-example.k

@@ -0,0 +1,8 @@
+\l ordered-dither.k
+
+/
+img:.'3_0:pnm:"Sylvester-Stewart.pnm"
+
+"ord.pnm"0:("P1";"330 330")," "/'$out:dth[mtx 2;img%pnm[2]]
+
+`0:done

+ 10 - 0
ngnk-libs/image/ordered-dither.k

@@ -0,0 +1,10 @@
+sh:-1_#'*:\
+
+B:(0 2;3 1)
+mtx:{(*/(x+1)#4;x{,/,'/'x+\:\:4*y}[B]/B)} / (factor;Bayer matrix)
+
+dth:{(f;m):x;(h;w):sh y;((h#w#/:m)%f)>y}
+
+/
+
+[[https://en.wikipedia.org/wiki/Ordered_dithering]]

+ 25 - 0
ngnk-libs/ipc/json-rpc-example.k

@@ -0,0 +1,25 @@
+\l json-rpc.k
+para:`0:
+
+para"register function sum: +/"
+sum:+/
+para""
+
+para"global id counter"
+id:0
+para""
+
+para"client asks"
+ask:{req["sum";id+:1;2 3 4]}
+para""
+
+para"create message"
+m:`j@r:!/+ask[]
+para@m
+para""
+
+r"id"
+
+para"answer:"
+m:`j@!/+recv@m
+para@m

+ 40 - 0
ngnk-libs/ipc/json-rpc.k

@@ -0,0 +1,40 @@
+\d json
+\l json.k
+\d .
+
+ec:!/+((`ParseError          ;-32700)
+       (`InvalidRequest      ;-32600)
+       (`MethodNotFound      ;-32601)
+       (`InvalidParams       ;-32602)
+       (`InternalError       ;-32603)
+       (`ServerNotInitialized;-32002)
+       (`UnknownErrorCode    ;-32001))
+
+J:("jsonrpc";"2.0")
+not:{(J;("method";x)),$[#y;,("params";y);()]}
+req:{not[x;z],,("id";_y)}
+rpl:{(J;("id";_x);("result";y))}
+err:{(J;("id";_x);("error";!/+(,("code";y)),z))}
+
+pe:{err[`null;ec`ParseError;,("message";"json parse error")]}
+msg:{x}
+
+ie:{err[(x["id"];`null)[y];ec`InvalidRequest;,("message";"invalid request")]}
+
+/ accomodate for parser by forcing symbols and flooring ints
+vld:{$[~*c:~^(!x)?("jsonrpc";"id";"method");0
+       (~c[2])|~`C~@x"jsonrpc";0
+       c[1]&^`i`C`s?t:@x"id";0
+       c[1]&(`s~t)&~`null~x"id";0;1]}
+
+err0:{`err@"WHOOPS",`k@x}
+
+LKUP:(,"dummy")!{}
+
+nme:{z;err[(x"id";`null)@*^(!x)?,"id";ec`MethodNotFound;,("message";"method \"",(x"method"),"\" not found")]}
+
+lkup:{$[mth:.[.:;`$x"method";{::}];mth;~^(!LKUP)?x"method";LKUP@x"method";nme[x]]}
+hdl:{$[~vld[x];:ie[x;*^(!x)?,"id"];];res:(lkup[x])[x"params"];$[^*(!x)?,"id";res;rpl[x"id";res]]}
+
+/ Maybe wrap in try and return `InternalError on errors that bubble up?
+recv:{$[(::)~r:.[json.prs;,x;{::}];pe[x];~/1_:\@r;msg@,hdl@r;msg@hdl'r]}

+ 41 - 0
ngnk-libs/json/README.org

@@ -0,0 +1,41 @@
+* JSON parser
+  [[https://ngn.codeberg.page/][ngn/k]] has a built in [[https://www.json.org/json-en.html][json]] parser which is many times faster than this.  So why have one written in K?
+
+  First, K is interpretted and so it's fundamentally easier to modify.  Also, since K objects don't
+  match 1-1 with json objects (no symbols in json, no distinct boolean types in K, etc.) there are
+  necessarily choices made in any implementation of a parser.
+
+** API
+   All functions are created under the ~json~ namespace.  The primary API points are ~tree~ and
+   ~prs~.  ~prs~ takes a string and returns a K "object".  That is to say if the toplevel json
+   structure is a list ~prs~ returns a list and if its an "object" ~prs~ returns a dict.
+
+   ~tree~ returns a list of three items: the nodes, the parent vector and the left vector.  This
+   amounts to an array representation of a tree, which I've taken to calling [[https://github.com/tlack/atree][Apter trees]] though I
+   haven't done any scholarship as to the actual origins.  There's likely more information available
+   in [[https://scholarworks.iu.edu/dspace/handle/2022/24749][Aaron Hsu's thesis]], but I haven't been through it.
+
+   Most people will probably make use of the ~prs~ interface, but having the ~tree~ available opens
+   up possibilities for alternative traversals.
+
+   From the tree representation, the nodes of interest will be ~"{"~, ~"["~ and ~":"~, which serve
+   as root nodes for /objects/, /lists/ and /pairs/ where an object is essentially just a list of
+   pairs.  From any object node, you can use the parent vector to find an array of pairs within it.
+   You can use the left vector to discover the default ordering of such pairs.
+
+** Downside
+   As mentioned above it's quite a bit slower than the built-in json parser.  When I last checked it
+   was a couple of magnitudes slower for reasonable sized json files and quite a bit slower for much
+   larger files.  This last is because it doesn't handle memory terribly efficiently.  If possible,
+   you should break up very large json files in to smaller chunks for processing.
+
+   Actually, it should be possible to turn this into a version which operates on the file in chunks,
+   but I haven't done any work in this direction.  Another possibility is to generate the tree
+   without doing any split at all.
+
+** References
+   The version of ~pr~ here was offered by [[https://github.com/mlochbaum][mlochbaum]] as discussed on
+   the [[http://www.jsoftware.com/pipermail/programming/2021-May/058263.html][J mailing list]].  It was quite a bit faster than the naive version I had originally.  ngn also
+   [[https://discord.com/channels/821509511977762827/821511172305846322/999258528898359296][offered a fast version]] which has the potential to be more useful for a chunked version, since it
+   uses a fold which can be resumed.  From that thread you can see contributions by @ovs and
+   @chrispsn to make other speed enhancements over the orginal version.

+ 45 - 0
ngnk-libs/json/json.k

@@ -0,0 +1,45 @@
+T:1_4\0xe74717
+ws:~^"\t\n\r "?
+
+/ make sure that all the brackets match
+chkb:{$[(,2)~?(2!!#:)#-':,/x@@[y;]#'=z;;`err@"mismatched brackets"]}
+
+/ make sure that keys to dictionaries are strings
+chkds:{$[~&/1,"\""=x@-1+&":"=x;`err@"keys are strings";]} / ")]} emacs :(
+
+/ pairs have objects as parents
+/ children of objects are pairs
+chko:{$[(~&/1,"{"=x@y@&":"=x)&&/1,":"=x@y@(~:)_&"{"=x@y;`err@"malformed object";]}
+
+pr:{(g;p):0 2\<((2*#x)#!2)+{2}#x;f:&~p;(g@(|\p*!#p)@f)@<g@f};dp:{+/~~,/(x@)\,!#x}
+
+prn:{p:pr@z@y;@[p;;:;].+.{$[~#c:&":"=x[y]@z;(();())
+                            ~1~#?3!c;`err@"malformed object"
+                            z@(c+\:1 2;c)]}[x;y]'=p}
+
+trm:{x:"[",x;(-/1(ws@x@)\)/y}
+tree:{ $[~#x:trm[x;#x]#x;:(!0;!0);]
+       m:{$[~^x?3;`err@"stray escape";x>0]}@0{T[x;y]}\2^"\\\""?x
+       $[~#s:(+/1(ws@x@)\)/&|':m<|/w:"{[]},:"=\:x;:(0;(,0)!trm[x;#x]);]
+       d:(-|/2#w)++\+/1 1 -1 -1*w:4#w>\:m
+       chkb[x;|/w;d]
+       chkds[x s]
+       end:!/|+1_,':s,#x
+       s:@[s;|w;:;s@w:-1 0+\:&":"=x@s]
+       s@:&~|/",]}"=\:x@s
+       chko[x s;p:prn[x;s;d]]
+       (p;trm[x]'s#end) }
+
+hx:16/+/1(-39*~:10>)\-48+_: / to hex
+u8:{`c$(0x00c0e0f0[c],c#128)+(0,64+&c:1+128 2048 65536'x)\x} /utf8 encode
+L:("true";"false";"null");E:`c$"\"\\/bfnrt"!(34 92 47 8 12 10 13 9)
+de:{,/{i:&<\"\\"=s:" ",x;s:@[s;ii@j;:;w@j:&~^w:E@s@ii:1+i];,/1_'(0,i)_s}'{$["\\u"~2#x;(u8@hx@2_6#x),6_x;x]}'_[;x]0,@/1(&"u"=x@1+)\&<\"\\"=x}
+
+tkn:{x@+/(!--)\(!y;.y)@\:z}
+leaf:{$["\""~*t:tkn[x;y;z];de@1_-1_t;~&/^(`c$"-","0"+!10)?*t;._t;~^L?t;`$t;t]} / "]} emacs :(
+
+prs:{t:tree[x];{$[~^":["?t:x[!y[1]]@z; o[x;y]'((y[0][z])^z)^0N
+                 "{"~t; !/+o[x;y]'((y[0][z])^z)^0N
+                 leaf[x;y[1];z]]}[x;(=t[0];t[1]);0]}
+
+dmp:{$[~^`c`C?t:@x;`k@x;~~/1_:\t;(","/o'x)/"[]";`m~t;(","/":"/'+o''(!x;.x))/"{}";~^`i`f?t;$x;|/(`$L)~\:x;$x]}

+ 31 - 0
ngnk-libs/parsing/earley-example.k

@@ -0,0 +1,31 @@
+\l earley.k
+
+para:`0:
+T:(("S:aS|b";"aab")
+   ("e:NNe|Ne|";"NNN"))
+
+para"parse grammar \"",p[0], "\" with input \"",((p:T 1)1),"\""
+para""
+
+(d;t):rec. p
+para"result table"
+t
+para""
+
+para"result internal data"
+d
+para""
+
+(rules;itms):dsp[d;t]
+para"List of productions"
+rules
+para""
+
+para"Table of items"
+`0:,/'$/|1(2+|/#',/)\($!#itms),'((-1_d[`i]),"-"),'itms
+para""
+
+para"trees"
+`0:`k'trees[d;t;itms]
+para""
+para"More explanation to come ..."

+ 51 - 0
ngnk-libs/parsing/earley.k

@@ -0,0 +1,51 @@
+/ Inspired by [[https://codeberg.org/effbiae/earley/src/branch/main]]
+/ Well written tutorial here: [[https://loup-vaillant.fr/tutorials/earley-parsing/]]
+
+/ Earley item: (rule; dot position; origin; id)
+empty:3#,!0
+id:{(1+#'x`p`i`i)/y}
+dot:{@[x[`t]@n;&x[`p;1+y[0]]=n:1+y[1]+x[`p;y[0]];:;" "]}
+aug:{,/1((id[x];dot[x])@\:)\y}
+data:{R:,/',/+'((*:;1_"|"\"|",)@'":"\)'" "\x
+     `p`t`i`bp!(+\0,#'R;,/R;y,"\0";(,0N 0N)!,())}
+
+pred:{[d;s;c;t]w:&(-1_d[`t]d[`p])=/:?c;(s;++(*|w;0;s+&#*w);!0)}
+scan:{[d;s;c;t]w:&d[`i;s]=/:c;(s+1;@[3#t[s]@\:w;1;+;1];,'s,/:w)}
+comp:{[d;s;c;t]$[~#c:&^c;:(s;empty;!0);]
+      f:(f@;::)@'w:&(d[`t;d[`p;t[s;0;c]]])=t[;4]f:t[s;2;c]
+      (s;{[x;y;z;w]y,'@[3#x[z;;w];1;+;1]}[t]/[empty].f;+(+f;s,/:c@w 0))}
+
+stp:{add[y 0;y 1]@[r:z[y 0;x;y[1;x;4];y 1];1;aug[y 0]]}
+add:{n:(z[1])@\:&((!#i)=i?i)&w:^(y[z 0;3])?i:z[1;3]
+     $[~#*|nxt:(+(z[0];i); z[2]);;x[`bp]:{@[x;y;?,;,z]}/[x`bp].nxt]
+     (x;@[y;z 0;,';n])}
+
+rec0:{{(d;st;s):$[n:#**|x 1;(x[0];x[1],,empty,(!0;"");1+x 2);:x]
+       ({y stp[x]/(pred;scan;comp)}[s]/(d;st)),s}/(x;,aug[x;y];-1)}
+rec1:{s:++(&=/1*:\x[`t]x`p;0;0);rec0[x;s]}
+rec:{-1_rec1 data[x;y]}
+
+cmbos:{!/:/({y,x[y]?z}[x].'!y;1_'(,0N){,/x,/:\:,'y}/y)}
+
+dsp:{[d;t]rules:{?[x[`t]y+!z;1;":"]}[d].'+-1 1_'1(-':)\d[`p]
+     itms:{x[z;1]{?[y;2+x;"."]}'y@x[z;0]}[t;rules]'!-1+#t
+     (rules;itms,''"/",/:/:$-1_t[;2])}
+
+tree:{$[^*z;:0N;]
+      ls:.[y[0];]'-1_c:1_|(*x@)\z
+      (y[1]. z;(c~'0N)'[c:o[x;y]'*'1_'x@1_c;ls])}
+
+trees:{?,/(cmbos[y[;3]]x[`bp]_0N 0N)tree[;(y[;4];z);]/:\:lst,/:&~y[lst:-2+#y;2]}
+
+apt:{(dp;nd):+{$[~`A~@y;,(x;y);(,(x;*y)),,/o[x+1]'y 1]}[0;x];(nd;tree.p@dp)}
+
+/
+/ back pointers:
+/ gives all possible choices of back pointers
+
+cmbos[r[1;;3]]r[0;`bp]_0N 0N
+
+/ display
+
+dsp[d;t]
+tree[bp;start]

+ 330 - 0
ngnk-libs/parsing/earley.org

@@ -0,0 +1,330 @@
+* Earley parsing (first draft)
+  [[https://en.wikipedia.org/wiki/Earley_parser][Earley parsing]] is a method for parsing strings according to a given grammar.  It
+  accomplishes something similar to other parsing algorithms like the [[https://en.wikipedia.org/wiki/LR_parser][LR]] and [[https://en.wikipedia.org/wiki/LL_parser][LL]] families
+  of parsers but with a wider range of applicability at the cost of some performance.
+
+  We'll go into some detail below about how they work but spend most of our time talking
+  about implementing this algorithm in an [[https://en.wikipedia.org/wiki/Array_programming][array language]], specifically [[https://ngn.codeberg.page][ngn/k]].
+
+  There are obviously [[https://loup-vaillant.fr/tutorials/earley-parsing/][other well-written expositions]] on the topic worth pursuing if your
+  interest is piqued.
+
+** Acknowledgments
+  I first became aware of Earley parsers when @effbiae [[https://codeberg.org/effbiae/earley][posted his]] on an [[https://matrix.to/#/#ngnk:matrix.org][ngn/k chat board]].
+  My version was inspired by his.
+
+** How it works
+  The goal is to take an expression like ~7+(2+3)*9~ and to turn it into a parser tree
+  such as the one below.
+
+  :                         ┬
+  :                        exp
+  :                         │
+  :                         +
+  :         ┌───────────────┴────────────────┐
+  :         7                                *
+  :                               ┌──────────┴───────────┐
+  :                              grp                     9
+  :                               │
+  :                               +
+  :                           ┌───┴────┐
+  :                           2        3
+
+  Here, we have a tree rooted at the entire expression with sub nodes for grouping and
+  each operation.
+
+  The basic idea of an Earley parser is to attack the problem with [[https://en.wikipedia.org/wiki/Dynamic_programming][dynamic programming]].
+  That is to recursively break the larger problem down to solving similar smaller problems
+  and assembling them into results.  In this case that amounts to breaking down the
+  parsing of the full expression into the parsing of subexpressions.  The neat part is
+  that the larger problem and the smaller problems are all handled /simultaneously/ during
+  a single pass of the input.
+
+  As many dynamic programming problems are handled with tables, using an array language
+  for this task feels like a good fit.
+
+  We'll be using the following grammar taken from [[https://loup-vaillant.fr/tutorials/earley-parsing/][@Loup's excellent tutorial]].
+
+  : Sum     -> Sum     [+-] Product
+  : Sum     -> Product
+  : Product -> Product [*/] Factor
+  : Product -> Factor
+  : Factor  -> '(' Sum ')'
+  : Factor  -> Number
+  : Number  -> [0-9] Number
+  : Number  -> [0-9]
+
+  Actually, we'll simplify it a bit by replacing all numbers with a single terminal
+  representing their role in the grammar and only handle addition and multiplication.  The
+  first is usually handled by a lexing stage which generates tokens having both a type and
+  a value and only considering the type during parsing.  As is traditional in array
+  language we'll also be parsimonious about the length of our token names.
+
+  : e:s|p
+  : s:p|s+p
+  : p:f|p*f
+  : f:(s)
+  : f:N
+
+  In this grammar we use the convention that ~:~ is used to separate a non-terminal from
+  its production, ~|~ is used to separate alternative productions, non-terminals are
+  lower-case letters and anything else is a terminal character.  So ~+~ and ~*~ are
+  terminals but so is ~N~ as well.  To futher simply the discussion let's split out
+  alternatives into separate productions wtihout alternatives.
+
+  : e:s
+  : e:p
+  : s:p
+  : s:s+p
+  : p:f
+  : p:p*f
+  : f:(s)
+  : f:N
+
+  Let's jump to the end for a bit to have a better look at what we're shooting for.
+
+  : Table of items
+  : 0         N         e:.s/0    e:.p/0    s:.p/0    s:.s+p/0  p:.f/0    p:.p*f/0  f:.(s)/0  f:.N/0
+  : 1         +         f:N./0    p:f./0    e:p./0    s:p./0    p:p.*f/0  e:s./0    s:s.+p/0
+  : 2         (         s:s+.p/0  p:.f/2    p:.p*f/2  f:.(s)/2  f:.N/2
+  : 3         N         f:(.s)/2  s:.p/3    s:.s+p/3  p:.f/3    p:.p*f/3  f:.(s)/3  f:.N/3
+  : 4         +         f:N./3    p:f./3    s:p./3    p:p.*f/3  f:(s.)/2  s:s.+p/3
+  : 5         N         s:s+.p/3  p:.f/5    p:.p*f/5  f:.(s)/5  f:.N/5
+  : 6         )         f:N./5    p:f./5    s:s+p./3  p:p.*f/5  f:(s.)/2  s:s.+p/3
+  : 7         *         f:(s)./2  p:f./2    s:s+p./0  p:p.*f/2  e:s./0    s:s.+p/0
+  : 8         N         p:p*.f/2  f:.(s)/8  f:.N/8
+  : 9         -         f:N./8    p:p*f./2  s:s+p./0  p:p.*f/2  e:s./0    s:s.+p/0
+
+  This is the dynamic programming table we're trying to produce.  Let's break it down.
+
+  As we said, we're going to do a single pass of the input.  The first column is how much
+  of the input we've completed considering, the second column is the input item being
+  considered for that row and the rest tracks how much of each subtask we've completed
+  with what are called "Earley items".
+
+  Each Earley item is a production rule with a couple of extra decorations.  The first is
+  a dot to indicate how much of this production we've seen in the input.  So the very
+  first Earley item in the first row is ~e:.s/0~ which corresponds to the production
+  ~e:s~.  The dot here indicates that this subtask has parsed any of its input up to
+  this point.  Looking at row ~8~ the first item in that row is ~p:p*.f/2~ which indicates
+  that this subtask has parsed the input so far as a non-terminal ~p~ and a terminal ~*~,
+  but is still waiting to parse the non-terminal ~f~ in the input to come.
+
+  The final decoration is a trailing integer following a slash which indicates from which
+  stage this particular subtask was originally spawned.  This will make more sense in a
+  second when we describe where these Earley items come from.
+
+  The algorithm starts by creating Earley items for each production of our starting
+  non-terminal, with "dot position" set to zero.  I.e. nothing has been completed for
+  these initial Earley items.  So starting out the items are the following
+
+  : e:.s/0    e:.p/0
+
+  Now to make progress we spawn Earley items repeatedly until we have consumed all the
+  input.  If we fail to consume all the input, then this is an incompleted parse.
+  Similarly, if we consume all the input but don't have any completed Earley items
+  (i.e. items for producing our start symbol whose dot positions are at the very end) then
+  we have an incomplete parse.  Thus for starters our algorithm will determine if there is
+  a complete parse of the input.  If we stop here we have an Earley /recognizer/ which
+  seeks to do this and no more.  A /parser/ will produce a tree showing which steps lead
+  to a complete parse.
+
+  We spawn Earley items in stages, making as much progress we can with the input so far
+  before moving on.  There are three ways to spawn subtasks which move us forward.  If the
+  next thing for us to complete is a non-terminal, (i.e. the thing immediately after our
+  dot position is a non-terminal), we can spawn a new task which attempts produce that
+  non-terminal starting with the current input.  If the token after our dot is a terminal
+  object which matches the current input then we make a new task which advances our dot
+  position and starting from /the following/ input.  If there's nothing left for our
+  current task to complete, we can look back at the items which were waiting on us and
+  advance /their/ dot position starting from the /current/ input.
+
+  These types of task spawning routines are each respectively called a "predictor",
+  "scanner" and "completor".  It's worth taking a second to make sure you understand why
+  each of these actually do advance our cause.
+
+  The algorithm has us perform each of these in sequence repeatedly until nothing more can
+  be done with the current input.  For this to work, we need the output to represent
+  "sets" of Earley items.  That is, a second application of a predictor shouldn't produce
+  more items identical to the those produced by the first application.  When we have sets
+  we should expect the list of items to settle eventually.  These sets for each stage of
+  the input are sometimes called "state sets".
+
+  Walking through the example above and applying the predictor to our initial items we get
+  the following:
+
+  : e:.s/0    e:.p/0    s:.p/0    s:.s+p/0  p:.f/0    p:.p*f/0
+
+  All of the new productions have their dot positions equal to 0 because they were just
+  created and have their origins equal to 0 because that is the stage in which they were
+  spawned.
+
+  None of the items at the current dot position are non-terminals nor at the end and so
+  the scanner and completor applications produce no new items, but cycling back to the
+  predictor, we see that all the items point to non-terminals, but only one points a /new/
+  non-terminal, namely ~p:.f/0~, and so we produce two more items.
+
+  : e:.s/0    e:.p/0    s:.p/0    s:.s+p/0  p:.f/0    p:.p*f/0  f:.(s)/0  f:.N/0
+
+  Now these new items do point to non-terminals, but only the last points to the current
+  input, so we produce a new item for the /next/ stage ready to consume /more/ input.
+  Namely, ~f:N./0~.  At this point none of the items at this stage are completed, nor
+  point to new non-terminals, nor produce any new items using the current input and so
+  this stage has stabilized.  We then move on to the next stage which starts out as the
+  following:
+
+  : 1         +         f:N./0
+
+  I.e. we're at stage 1, the current input is now ~+~ and we have a single Earley item to
+  consider.  Let's walk through just a couple of more items so we can see an example of a
+  completor spawn items.  This single item has its dot position at the end and so neither
+  the predictor nor scanner apply, but the completor /does/ apply.  We need to look back
+  at the stage that began this subtask, i.e. stage 0 and find all items that were waiting
+  on the completed non-terminal.  Here this is only one: ~p:.f/0~.  Because we've
+  completed ~f~ we can advance the dot position to ~p:f./0~ and we add it to the current
+  stage because now it's ready to wait on more input.  Notice that the origin of this
+  remains ~0~.  This is not a new subtask, but rather the advancement of an earlier task.
+  (No pun intended.)
+
+  At this point feel free to work through the rest of the table to verify that at stage 9
+  we do end up with a stable set of items having consumed all of the input.  Further
+  notice that there is an item whose origin is 0 and is a production of our start symbol
+  which is completed.  Namely, ~e:s./0~.  This means that this was a successful parse.
+
+  So somehow through this process we've made it to the end, but in our wake we've left a
+  forest of Earley items.  (Apologies for the mixed metaphor.)  We'd like to figure out
+  /how/ we made it to the end.  In order to do that we'll need to keep track of /how/ we
+  advanced by saving "back pointers".
+
+  It turns out that it suffices to track how both the scanner and completor created items.
+  That's because we're really only interested in how dot positions are advanced.  Starting
+  with ~e:s./0~ at stage 9, we ask "how did we get here?".  Moving the dot position back
+  we see that we advanced past a non-terminal which must mean that there is some item at
+  the same stage which completes that a production of that non-terminal.  If we look at
+  the first item at stage 1 above and ask how we ended up there, we see that we advanced
+  some item from the previous stage because it was waiting on the input at that stage.
+
+  So scanners advance items when an item's current dot position matches the current input
+  and completors advance items from an earlier stage because some item in the current
+  stage represents the completion of the non-terminal at the dot position of the former
+  item.  That is, for scanners we keep track of which item was advanced to which item and
+  for completors we track in addition which item caused that advancement.
+
+  By back-tracking each of the advancements we can follow the history of an item and
+  recursively track the history of items which caused the advancement we can map out the
+  parsing of that subexpression.
+
+  Okay, maybe more detail than I thought I'd get through but this is the basic lay of the
+  land.  Let's move on to how we code this up.
+
+** The code
+  Moving dots through a string representation of a production is a great way to visualize
+  how this process works but for coding this up we really only need to track the position
+  of the dot through a given production, thus we'll represent an Earley item with a triple:
+
+  : (production number; dot position; origin state set)
+
+  To this basic triple we'll add two derived items: a unique integer identifier and the
+  actual item at the current dot position because we'll be using this for checking whether
+  this item is a terminal or not and looking up whether it matches a completed current
+  item.
+
+  Each stage will be represented by an array of such five-tuples and the full table will
+  be a collection of such arrays.  As is common in array programming, instead of keeping
+  arrays of (mixed) tuples we'll actually be keeping five-tuples of (heterogeneous) arrays.
+
+  In perhaps an excessive flourish we keep all the productions in a single string and note
+  which portions of that string represent which productions.  We detect reaching the end
+  of a production by testing if the start of the production plus our dot position point to
+  the next production.  Actually, since a 0 dot position is represented as being before
+  any part of the prodution is completed, we start out pointing at the non-terminal being
+  produced and then use the dot position to advance through the actual production tokens.
+
+  Our parsing stage is comprised with a set of base data including the back pointers and
+  the table we're producing.  The former is threaded through the program as a dictionary.
+
+  : data:{R:,/',/+'((*:;1_"|"\"|",)@'":"\)'" "\x
+  :      `p`t`i`bp!(+\0,#'R;,/R;y,"\0";(,0N 0N)!,())}
+
+  Here ~`p~ is the list of pointers into the string of all the productions as a list of
+  tokens ~`t~.  We also save the input at ~`i~ and back pointers at ~`bp~.
+
+  To produce a unique id we note that there are a finite number of productions and that
+  neither the dot position nor the origin can be bigger than length of the input plus one.
+  This allows us to see the original triple as a mixed base number.
+
+  For the current token we use the calculation from the production index and dot position
+  mentioned above and replace anything at the end position with a blank character.
+
+  Here are our id and current token functions:
+
+  : id:{(1+#'x`p`i`i)/y}
+  : dot:{@[x[`t]@n;&x[`p;1+y[0]]=n:1+y[1]+x[`p;y[0]];:;" "]}
+
+  We use the convention that the start token is the non-terminal of the first production
+  and look for all productions sharing that start token.  Our initial data starts with
+  each of these at dot position and stage 0.
+
+  : s:++(&=/1*:\x[`t]x`p;0;0)
+
+  The core of the algorithm is a fold over each of the step types wrapped in two nested
+  fixed point computations.  The outer fixed point iterates over a triple of the following
+  form:
+
+  : (program state; table; current stage index)
+
+  and bumps the current stage index and adds an empty state set to our list of state sets
+  as long as the last state set is not empty and just returns the input if it is empty.
+
+  The inner fixed point keeps the stage index fixed and iterates over the pair of program
+  state and table and calls the fold over each of the step types in turn until it
+  stabilizes.
+
+  Each step type is wrapped in a function which prepares the input as a set of relevant
+  items, namely the program state, the current state index, the current tokens of all
+  items at the current state and the full table.  This wrapper also takes care of ensuring
+  we don't have duplicates by degating the table update to another function which both
+  checks the ids of the proposed items against the current items and appends the derived
+  data.
+
+  : stp:{add[y 0;y 1]@z[y 0;x;y[1;x;4];y 1]}
+  : add:{n:(z[1],(i;dot[x]@z[1]))@\:&((!#i)=i?i)&w:^(y[z 0;3])?i:id[x]@z[1]
+  :      $[~#*|nxt:(+(z[0];i); z[2]);;x[`bp]:{@[x;y;?,;,z]}/[x`bp].nxt]
+  :      (x;@[y;z 0;,';n])}
+
+  The predictor is very straightforward.  We simply look for anything in the current state
+  set whose current token is on the left hand of a production and create a new item with
+  the corresponding production rule and dot position set to zero and origin equal to the
+  current state.  We have no need for back pointers in this case.
+
+  The scanner is might be even simpler since we're comparing the current tokens against a
+  single token.  In this case we create a new item which is effectively a duplicate of the
+  original with it's dot position bumped up by one.  This is added to /the succeding/
+  state.  We also take note of the items that were copied and store them as back pointers
+  for the corresponding new items.
+
+  The most complicated of the three is the completor because we have to do a more
+  sophisticated lookup.  First we have to find the completed items.  Because of our
+  implementation of the current token function, these are simply the items whose current
+  token is a blank character.  Next for each completed item we need to lookup /that
+  item's/ origin state set and compare the completed non-terminal against the current
+  tokens of that origin state set.  Since we're using an array language we can do this for
+  all completed items simultaneously.  One wrinkle is that the items we find will be in
+  various state sets.  We have to take care to note which came from which both to produce
+  new items and to store back pointers.  We create the new items as a fold over pairs of
+  state set indices and indices into that state set, starting with an empty set and
+  repeatedly adding copies of the original indicated item with their dot position bumped
+  up by one.  For back pointers, we take note of both which item is copied and which item
+  prompted the copy.
+
+  Here are each of those:
+
+  : pred:{[d;s;c;t]w:&(-1_d[`t]d[`p])=/:?c;(s;++(*|w;0;s+&#*w);!0)}
+  : scan:{[d;s;c;t]w:&d[`i;s]=/:c;(s+1;@[3#t[s]@\:w;1;+;1];,'s,/:w)}
+  : comp:{[d;s;c;t]$[~#c:&^c;:(s;empty;!0);]
+  :       f:(f@;::)@'w:&(d[`t;d[`p;t[s;0;c]]])=t[;4]f:t[s;2;c]
+  :       (s;{[x;y;z;w]y,'@[3#x[z;;w];1;+;1]}[t]/[empty].f;+(+f;s,/:c@w 0))}
+
+  And that's basically it for the recognizer.  All that's left is to use the back pointers
+  to assemble the parse tree should we have a complete parse.
+

+ 21 - 0
ngnk-libs/png/README.org

@@ -0,0 +1,21 @@
+* PNG tools
+  First swipe.  Lots of clean up to come.
+
+  ngn/k is not really built for doing decompression.  (At least not with my level of understanding
+  of compression.)  That is to say, decoding bits is very inefficient.  Currently this shells out to
+  ~zlib_decompress~ which I have on my iMac and works well enough for now.  You can replace this
+  with anything that decompresses a zlib stream to stdout.
+
+  For output, we actually avoid compression by using the "no compression" version of the [[https://datatracker.ietf.org/doc/html/rfc1950][deflate RFC]].
+
+  We also compute the [[http://www.libpng.org/pub/png/spec/iso/index-object.html#D-CRCAppendix][crc check]] though this is also a bitwise computation.  Really this should all
+  just be handed off to zlib, but it was fun to try and works well enough if your image size is
+  pretty small.
+
+  The example takes the [[https://github.com/mthom/scryer-prolog/tree/master/logo][scryer logo]] and turns it into a black and white image by replacing pixels
+  with bits indicating if the pixel is the background color or not.  Then we use this data to
+  generate a no-filtering/no-compression png by hand.
+
+** Goals of this project
+  Nothing major.  Mostly I just wanted to get at some image data to play around with some
+  algorithms.  This might change.

+ 50 - 0
ngnk-libs/png/example.k

@@ -0,0 +1,50 @@
+\l png.k
+
+/ [[https://github.com/mthom/scryer-prolog/tree/master/logo]]
+png:1:fn:"scryer.png"
+$[~png.ispng[png];`err"Not a png file: ", fn;]
+
+(w;h):*png.ihdr@png
+idat:png.decmp@png.idat@png
+data:png.unfilt[8](h,0N)#idat
+
+/ mark each pixel as either bg color or not
+/ extend each row to have length a multiple of 8
+bw:((0x5757d3d3dede~/:-2_'0N 8#)'data),\:0
+/ collect this data as bytes
+/ add a byte to the front to indicate no filtering
+bwdata:,/(`c$0),/:(`c$2/+0N 8#)'bw
+
+/ build up a b/w png
+png0:png.PNGHDR
+/ same dimensions, but now bit-depth 1, color-type 0
+png0,:png.mkchnk["IHDR";`c$,/((4#256)\'w,h),1 0 0 0 0]
+/ "compress" at level 0
+png0,:png.mkchnk["IDAT";png.nocmp@bwdata]
+png0,:png.mkchnk["IEND";!0]
+"/tmp/bw.png" 1:png0
+
+/ bottom image
+/ [[https://www.w3.org/Graphics/PNG/Inline-img.html]]
+png:1:fn:"test.png"
+$[~png.ispng[png];`err"Not a png file: ",fn;]
+
+(w;h):*png.ihdr@png
+idat:png.decmp@png.idat@png
+
+/ Pull out the interlaced images
+c:png.iszs[w;h]
+
+PIXELWIDTH:3
+ii:(-1_+\0,(*|c)+PIXELWIDTH**/c)_idat
+
+/ Unfilter each separately
+rr:png.unfilt[PIXELWIDTH]'((*|c),\:0N)#'ii
+
+/ interlace back into a single image with no filtering
+dd:,/(`c$0),/:,/'(0N,w)#((0N,PIXELWIDTH)#,/,/'rr)@<<,/w#'h#png.A7
+png0:png.PNGHDR
+png0,:png.mkchnk["IHDR";`c$,/((4#256)\'w,h),8 2 0 0 0]
+png0,:png.mkchnk["IDAT";png.cmp@dd]
+png0,:png.mkchnk["IEND";!0]
+"/tmp/testing.png" 1:png0

+ 58 - 0
ngnk-libs/png/png.k

@@ -0,0 +1,58 @@
+\d png
+PNGHDR:0x89504e470d0a1a0a
+A7:(0 5 3 5 1 5 3 5
+    6 6 6 6 6 6 6 6
+    4 5 4 5 4 5 4 5
+    6 6 6 6 6 6 6 6
+    2 5 3 5 2 5 3 5
+    6 6 6 6 6 6 6 6
+    4 5 4 5 4 5 4 5
+    6 6 6 6 6 6 6 6)
+
+/ shell out for bit-oriented algos
+decmp:{`x(("./png.py";"--decompress");x)}
+cmp:{`x(("./png.py";"--compress");x)}
+crc:{`c$256\.`x(("./png.py";"--crc");x)}
+
+/ Basic chunk deciphering
+ispng:{PNGHDR~8#x}
+chunk:{(ln;nm):2 4#@[x;y+!8];ln:256/+/1(256*0>)\ln;(ln;nm)}
+chnks:{+1_*+(~"IEND"~*|*:){(ln;nm):chunk[x;*|y];((ln;nm);ln+12+*|y)}[x]\(();8)}
+data:{(ls;ns):chnks x;+(ns;+/(8+-1_+\8,12+ls;!'ls))}
+idat:{x@,/(*|)'("IDAT"~/:*:')#data@x}
+ihdr:{(w;h;f):256!0 4 8_x@16+!13;(256/+(w;h);f)}
+
+/ filter routines
+paeth:{(x;y;z)@*<{x|-x}((x+y)-z)-/:(x;y;z)}
+
+unnone:{z}
+unsub:{,/+\(0N,x)#z}
+unup:{y+z}
+unavg:{x_{[x;a;b;c]a,256!c+-2!b+a@(#a)-x}[x]/[x#0;y;z]}
+unpaeth:{x_{[x;a;b;c;d];a,256!d+paeth.(a@(#a)-x;b;c)}[x]/[x#0;y;(-x)_(x#0),y;z]}
+
+unfilt:{`c${(t;r):0 1_z;256!(*(unnone;unsub;unup;unavg;unpaeth)t)[x;y;r]}[x]\[(-1+#*y)#0;256!y]}
+
+/ sizes of interleaved images
+iszs0:{(-8!({x,y}/#''='x)*((!7)!7-(|/,/&')'(!7)=\:x)+y)}
+iszs:{[w;h](iszs0.'+((A7;+A7);(w;h)))@\:!7}
+
+/ Manual "compression".  (Just use tools above.)
+(CMF;FLG):120 1
+/ No compression [[https://datatracker.ietf.org/doc/html/rfc1950]]
+nozip:{`c$CMF,FLG,,/,'/|1(1_|128,#/|0,#:)\,'/|1(,'/2/'''0N 8#/:/:1~:\+(16#2)\#:')\0N 65535#x}
+/ [[https://datatracker.ietf.org/doc/html/rfc1950#section-9]]
+ad32:{b:+/a:(1+\256!x);`c$2/'0N 8#,/(16#2)\'65521!/:(b;*|a)}
+nocmp:{,/(nozip;ad32)@\:x}
+
+/ Manual crc32 algo      (Just use tools above.)
+/ [[http://www.libpng.org/pub/png/spec/iso/index-object.html#D-CRCAppendix]]
+P:,/+(8#2)\256!0xedb88320
+crctbl:(8{$[*|x;~P=-1_0,x;-1_0,x]}/(32#2)\)'!256
+/crc:{`c$256\2/~{~crctbl[2/-8#~x=(32#2)\y]=-8_(8#0),x}/[32#1;256!x]}
+mkchnk:{(`c$(4#256)\#y),,/1 crc\x,y}
+
+\d .
+
+/
+SPEC: https://www.w3.org/TR/png/

+ 48 - 0
ngnk-libs/png/png.py

@@ -0,0 +1,48 @@
+#!/opt/local/bin/python
+import zlib
+import sys
+import argparse
+
+
+def decompress():
+    data = sys.stdin.buffer.read()
+    decom = zlib.decompress(data)
+    sys.stdout.buffer.write(decom)
+
+
+def compress():
+    data = sys.stdin.buffer.read()
+    decom = zlib.compress(data)
+    sys.stdout.buffer.write(decom)
+
+
+def crc():
+    data = sys.stdin.buffer.read()
+    ret = zlib.crc32(data)
+    sys.stdout.write(f'{ret}')
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        prog='png-helper',
+        description='Tools for working with png files in ngn/k')
+    parser.add_argument('-d', '--decompress', action='store_true')
+    parser.add_argument('-c', '--compress', action='store_true')
+    parser.add_argument('-r', '--crc', action='store_true')
+
+    args = parser.parse_args()
+    if not sum([args.decompress, args.compress, args.crc]) == 1:
+        print("Must supply exactly one of --decompress, --compress or --crc")
+        parser.print_help()
+        return
+
+    if args.decompress:
+        decompress()
+    elif args.compress:
+        compress()
+    else:
+        crc()
+
+
+if __name__ == '__main__':
+    main()

+ 26 - 0
ngnk-libs/regex/example.k

@@ -0,0 +1,26 @@
+\d re
+\l re.k
+\d .
+para:`0:
+
+para"regex: ", rex:"[\\+-]?[0-9]+(\\.[0-9]+)?([Ee][\\+-]?[0-9]+)?"
+rx:re.match[re.cmp@rex]
+{para"match: ",x;`0:$rx x}'("1";"-3.1415e0";"-.1415e0";"-3.1415e";"dog");
+
+cases:("abc";"xxxabbc";"abbbcyyy";"xxxabbbbcyyy")
+testcases:{para"search ",z;`0:$x z;para"match ",z;`0:$y z}
+
+test:{para"---";para"regex: ", x;testcases[re.match[re.scmp@x];re.match[re.cmp@x]]'y;}
+
+test["ab+c";cases]
+test["^ab+c";cases]
+test["ab+c$";cases]
+test["^ab+c$";cases]
+
+/
+/  [[https://github.com/gitonthescene/ngnk-libs/tree/master/trees]]
+\d tree
+\l trees.k
+\d .
+(e;t):re.prs rex
+tree.shw[`k'e;t]

+ 80 - 0
ngnk-libs/regex/re.k

@@ -0,0 +1,80 @@
+/ Inspired by [[http://nsl.com/papers/re.htm]] by Raul Miller
+/ Expanded to J at [[https://code.jsoftware.com/wiki/Essays/RegularExpressions/NondeterministicFiniteAutomata]]
+
+pr:{r:@[;;|;]\[(!0)@&1+|/x;x;!#x];(!#x)^'r@'x-1}
+
+exp:{@[@/r;&=':*|r:1(&1+x<"("=)\y/,'"()";:;"|"]}
+msk:{-1_0,<\"\\"=x}
+dp:{d:(-x<"|"=y)+2*(-*t)++\-/t:x</:"()"=\:y
+    $[1<|/dd:(-*t)++\-/t:x</:"[]"=\:y;`err@"bad character class"
+      |/0,~^"()"?y@&x<dd;`err@"bad character class"
+      |/dd<0;`err@"bad character class";]
+    d+dd}
+
+cls:{$[3>#x;x
+       |/0>r:-/i:x@-1 0++1_,':&~|':<\"-"=x," ";`err@"bad class"
+       ,/`c$i[1]+!'1+r]}
+
+prs:{ t:dp[m:msk e] e:exp[0,msk[x],0;x]
+      (e;t;m):(e;t;m)@\:&~|/m</:"])"=\:e
+      (e;t;m):(e;t;m)@\:&~<\"\\"~/:e
+      t:pr t
+      e:@[e;?(t,&|/"*+?.|^$"=\:e)^&m;`i$]
+      t:@[t;;:;].,'/{|y@1(-1+)\&|/x[y]~\:/:"?*+"+0}[e]'=t
+      (e;t)}
+
+cmp0:{[e;t] d:(~e[t]~\:"["+0)&|/e~\:/:"^$"+0
+      (e;t):(-+/d)_/:(::;(<g)@)@'(e;t)@\:g:<d
+      s:{$[`c~_@n:x@z;step@n
+      n="?"+0;maybe@o[x;y]@*y[z]
+      n="*"+0;rep@o[x;y]@*y[z]
+      n="+"+0;rep1@o[x;y]@*y[z]
+      n~"("+0;or@o[x;y]'y[z]
+      n~"|"+0;seq@o[x;y]'y[z]
+      n~"["+0;$["^"=*c:x@y[z];nstep@,cls@1_c;step@,cls@c]
+      n~"."+0;nstep@,!0
+      x@z]}[e;(=0N,1_t),(,0N)!,!0]@0
+      fix s}
+
+cmp:{cmp0.(prs x)}
+
+anc:{[n;t]s:&n~\:"|"+0;e:s^'*'|'&'s=\:t
+     +(((s;s);(e;s)))@\:'&'~|/'n[(s+1;e)]~\:/:'("(^";"$$")+0}
+
+scmp:{(n;t):prs x
+      (n;t):(::;(<g)@)@'(n;t)@\:g:{y,,/o[x]'x y}[(=0N,1_t),(,0N)!,!0]0
+      a:anc[n;t]
+      g:@[m;0 1+/:,/(::;(#m)-2+)@'(1|:\(m:&1+@[&#t;a[0];+;2]))?'a[0];:;0N 2#(#t)+!2*#,/a[0]]
+      n,:(2*#,/a[0])#0+"*."
+      t,:,/+(,/a[1];(#t)+2*!#,/a[0])
+      cmp0.(::;(<g)@)@'(n;t)@\:g}
+
+non:256
+nul:(non+1)#,!0
+final: {(#x)-1}
+
+branch:{,@[nul;non;:;,/x]}
+step:{branch[,1],(, @[nul;,/x;:;2]),,nul}
+nstep:{branch[,1],(, @[nul;(!non)^,/x;:;2]),,nul}
+
+maybe:{branch[1,#x],1+x}
+seq:{.[,/(+\0,-1+#'x)+(-1_'x),,,nul;0,non;,;(~#x)#1]}
+rep1:{x[,final x;non],:,0,#x; x,,nul}
+rep:maybe@rep1@
+or:{branch[m],(,/+/(*|n;x)*1~:\(m+c-1)=x+:m:-1_n:+\1,c:#'x),,nul}
+re:seq@step'
+
+comb: {?y,,//x@y}
+tc: {comb[x;]'/x}
+fix: {comb[tc x[;non];]''x}
+
+next: {?,/x[0][y;x[1]z]}
+match:{n:next[(x;"",y)]
+       final[x]=|/*(&/#:'){(a;b):y;(x[a;*b];1_b)}[n]/(x[0;non];!#y)}
+
+nfa2dfa:{c:1((@/1<:\?:)','/x@)\;r:{y,,/(x'(~#:')_^[;*+y]@?*|:)'y}[c]/,c x[0;re.non]
+         (&|/'~^(,re.final[x])?*r;0^?/r:+r)}
+
+nfacmp:nfa2dfa@cmp@
+nfascmp:nfa2dfa@scmp@
+nfamatch:{~^x[0]?0{x[y;z]}[x[1]]/y}

+ 417 - 0
ngnk-libs/trees/README.org

@@ -0,0 +1,417 @@
+* Some tree related routines
+
+* Ramblings
+  I hope to write up something more easily digestible but for now just doing a bit of a brain dump.
+
+  Since K has nested vectors (lists) you could always represent trees in a Lisp-y way.  I.e.
+  ~(root; ((leftchld; leaf); (rightchld; leaf)))~ where a node is ~(value; (children))~ and a leaf
+  is somehow distinguishable from a non-leaf by being say an atom.
+
+  There are some slightly annoying aspects to this, though.  For one, you need a convention to
+  distinguish leaves from interior nodes.  For another to access interior nodes you need to descend
+  the tree.  Sure, nine times out of ten when you're working with trees you're having to walk the
+  tree, but still.  Lastly, nested lists mean pointers of pointers which just doesn't feel very
+  array-like.
+
+  What can be done?  Well there are [[https://github.com/JohnEarnest/ok/blob/gh-pages/docs/Trees.md][other representations of trees]], but the one I prefer is what I
+  like to call an [[http://nsl.com/k/tableaux/trees.k][Apter tree]].  You can find examples on [[http://nsl.com][nsl.com]] or this article from
+  [[http://archive.vector.org.uk/art10500340][Vector magazine]], but depending on your appetite this may not feel like enough of a description
+  of how they work.
+
+  [DISCLAIMER: I haven't done much(any?) research into the history of this data structure but I've
+  also not run into anyone else who has.]
+
+** Apter trees
+   The basic idea is to separate the /structure/ of tree from its /contents/.  By "structure" we mean
+   which nodes are connected to which other nodes.  While it might seem natural to do this with a
+   dictionary of node -> list of children this brings us back to nested lists.  Here's where a neat
+   observation comes in.
+
+   : While each node can have multiple children, each node (except the root) has only /one/ parent.
+
+   Thus if we have a tree such as ~(15;(12 9;18 2))~, then the parent of the leaf ~9~ is ~12~ and the parent
+   of the ~12~ is ~15~.  Note that even here there's a bit of awkwardness.  Is ~12~ a node or is it a value?
+   Similarly for ~9~.
+
+   Let's give each node an index using a depth first preorder of the tree.  So we have 5 nodes whose
+   indices are ~0 1 2 3 4~ and whose values are ~15 12 9 18 2~.  Now we can say that the parent of node
+   ~2~ is ~1~ and the parent of node ~1~ is ~0~.  So using ~0N~ for the parent of the root (for now),
+   the parent of each node is ~0N 0 1 0 3~.  Now we have a /"parent vector"/ and a /"values vector"/.
+   If nothing else this clears up the confusion of which we're talking about.
+
+   Note that there's nothing really special about using depth first preorder.  If we shuffled the indices,
+   we'd simply need to shuffle the indices of each parent to point to its new location.  What /is/ important
+   is that the parent vector and the values vector be in the /same order/.  Any ordering would faithfully
+   represent the tree, but conventions are useful.
+
+   Let's give these names ~p:0N 0 1 0 3~ and ~n:15 12 9 18 2~.  So the value of our node at index ~2~ is
+   ~n[2]~9~ and its parent is at index ~p[2]~1~.  The value of the parent is ~n[p[2]]~12~ and the parent
+   of the parent at index ~2~ is ~p[p[2]]~0~.  But we're using an array language!  We can do this across
+   all the indices simultaneously. ~p[!#p]~0N 0 1 0 3~ and ~p[p[!#p]]~0N 0N 0 0N 0~.  This last could also
+   be written ~2(p@)/!#p~.
+
+   We could also look at the fixed point scan which gives a matrix whose columns give paths from each index
+   up to the /parent/ of the root, which is currently marked with ~0N~
+
+    :  (p@)\!#p
+    : (0 1 2 3 4
+    :  0N 0 1 0 3
+    :  0N 0N 0 0N 0
+    :  0N 0N 0N 0N 0N)
+
+   While this works well enough for many circumstances its more useful to stop at the root.  To make this
+   happen we make the root /self-parenting/.  I.e. we define our parent vector to be ~p:0 0 1 0 3~.
+
+    :  (p@)\!#p
+    : (0 1 2 3 4
+    :  0 0 1 0 3
+    :  0 0 0 0 0)
+
+*** Depth vectors
+    Above we basically eyeballed the parent vector.  How do we generate a parent vector more generally?
+    This of course depends on the information we're given, but often it's easiest to first shoot for a
+    slightly different representation.  That is a depth vector.
+
+    The depth vector is simply the length of each path from a leaf up to the parent.
+
+    I.e.
+
+     :  +/1&(p@)\!#p:0^p
+     : 0 1 2 1 2
+
+    Generally it's easier to generate a depth vector than to directly generate a parent vector.  For
+    instance ~d:{$[`i=@y;x;,/x,o[x+1]'y 1]}[0;(15;(12 9;18 2))]~ or ~+\-/"()"=\:"(a(bc(d)ef))"~.  (We'll
+    talk more about this second in a bit.)
+
+    Okay, let's assume depth vectors are easy to come by.  How does this help us create a parent vector?
+    Here's where conventions help out.  If our depth vector is depth first preorder then we know that our
+    parent is at the index /"closest to the left at depth one less than ours"/.  This is calculable!  There
+    are different approaches to doing this calculation, but let's start with the easiest.
+
+    :  {|/0,&x=-1+*|x}',\d
+    : 0 0 1 0 3
+
+   Okay so now let's say we have a parent vector and a node vector, what can we do with them?  Well, we
+   can do things like ask which index is the parent of the node with value ~9~?  We can simply do ~p[n?9]~.
+   Notice that we didn't have to walk the tree to find this!!
+
+   But what if we did want to walk the tree?  That's still an option.  We just need to find children of
+   each parent vector.  We can use "group" to do that ~=p~.  Here, having the root being self-parenting
+   can be a bit of pain since we want to recurse to proper children and not enter an infinite loop.
+   With simple trees like this we can just do ~=0N,1_p~ to effectively make our root a child of some
+   /other node/ we'll never refer to.  There's also another little trick to get a useful prototype.
+   Let's use ~chld:(=0N,1_p),(,0N)!,!0~ as our map from parents to children.  Then ~chld[0]~1 3~ and
+   ~chld[2]~ is ~!0~.
+
+   Now we can do the following to descend the tree:
+
+   :  chld:(=0N,1_p),(,0N)!,!0
+   :  {(x@z;o[x;y]'y@z)}[n;chld]0
+   : (15
+   :  ((12;,(9;()));(18;,(2;()))))
+
+   (Here leaves have empty children, but this is simply an example of what's possible.)
+
+
+*** Sure, but why?
+    Fair question.  Well posed.  My motivation was mostly to see what's possible with a data structure
+    I wasn't familiar with, but if you keep your eye out for it similar structures pop up in other
+    domains.  The motivation is usually that by using vectors you get the benefit of data locality.
+    I.e. you're likely to have related items accessed together hot in the cache.  On the negative side,
+    as is common with vector structures, modifying them means moving them in bulk to ensure that they're
+    in contiguous memory.  Thus they really shine when the size of the structure is relatively stable.
+
+    Weighing the benefits is not unlike comparing linked lists to arrays.  Each node in a linked list
+    might be stable in memory at the expense of data locality whereas you may need to move arrays around
+    if you need to ensure data locality.
+
+    Apter trees may not be right for your application, but in my experience they have their good points.
+    To my mind separating the structure from the content reduces some mental load.  I find it easier to
+    picture the kind of manipulations that are possible and how to accomplish them.
+
+    If you're still curious at this point, read on...
+
+*** Applications
+    Obviously, Apter trees being trees means they're useful wherever trees are useful.  But let's
+    explore a few examples, starting with the string parsing above.
+
+    This algorithm for finding the [[https://dl.acm.org/doi/pdf/10.1145/800136.804466][depth of parenthesization]] is a classic.  I first read about it in a
+    quote from Alan Perlis speaking about how he was amazed that APL could do something so useful with
+    so few characters.  Let's have a look at it again.
+
+    :  +\-/"()"=\:"(a(bc(d)ef))"
+    : 1 1 2 2 2 3 3 2 2 2 1 0
+
+    It may be easier to match each character with it's depth:
+
+    :  +(txt;+\-/"()"=\:txt:"(a(bc(d)ef))")
+    : (("(";1)
+    :  ("a";1)
+    :  ("(";2)
+    :  ("b";2)
+    :  ("c";2)
+    :  ("(";3)
+    :  ("d";3)
+    :  (")";2)
+    :  ("e";2)
+    :  ("f";2)
+    :  (")";1)
+    :  (")";0))
+
+    One thing to notice, is the matching close parenthesis for each open parenthesis is one deeper.
+    Another thing to notice is that each opening parenthesis is at the same depth as its contents.
+    This is more than likely not what you want, but is easily fixable.
+
+    :  +(txt;(-*ps)++\-/ps:"()"=\:txt:"(a(bc(d)ef))")
+    : (("(";0)
+    :  ("a";1)
+    :  ("(";1)
+    :  ("b";2)
+    :  ("c";2)
+    :  ("(";2)
+    :  ("d";3)
+    :  (")";2)
+    :  ("e";2)
+    :  ("f";2)
+    :  (")";1)
+    :  (")";0))
+
+    For simple compilers and parsers, this basic idea forms the baseline of the parsing process.
+    As above, the step from a depth vector to a parent vector and thus to a tree structure is not
+    very far.
+
+    Doing this again with something more recognizable:
+
+    :  +(txt;d:(-*ps)++\-/ps:"()"=\:txt:"(3*(4+9))-7")
+    : (("(";0)
+    :  ("3";1)
+    :  ("*";1)
+    :  ("(";1)
+    :  ("4";2)
+    :  ("+";2)
+    :  ("9";2)
+    :  (")";1)
+    :  (")";0)
+    :  ("-";0)
+    :  ("7";0))
+    :  d
+    : 0 1 1 1 2 2 2 1 0 0 0
+    :  {|/0,&x=-1+*|x}',\d
+    : 0 0 0 0 3 3 3 0 0 0 0
+
+    Notice that the open parentheses become parent (i.e. interior) nodes and that everything else
+    is a leaf.  Thinking about it a little bit more, we only really needed the close parentheses
+    as a marker of where the corresponding open parentheses end.  They act sort of like how null
+    characters act at the end of C strings.  Now that we have that structure in the parent vector,
+    we no longer need these nodes.
+
+    In general removing items from the parent vector could throw off the indices in that vector.
+    Consider ~(1+3)*(4+6)~.  There is a trick for doing this, but more straightforward here is
+    to edit the depth vector.  Because closed parentheses don't interupt the "closest index to the
+    left at depth one less than ours", removing them doesn't effect the parent vector we'll
+    generate from it.
+
+    :   +(txt;d):(txt;d:(-*ps)++\-/ps)@\:&~*|ps:"()"=\:txt:"(3*(4+9))-7"
+    : (("(";0)
+    :  ("3";1)
+    :  ("*";1)
+    :  ("(";1)
+    :  ("4";2)
+    :  ("+";2)
+    :  ("9";2)
+    :  ("-";0)
+    :  ("7";0))
+
+    It's this sort of thinking about trees that I enjoy about Apter trees.  It's as much like
+    painting as it is like programming.  Note that here we're considering ~txt~ to be the values
+    vector and we make our adjustments to both the depth vector and the values vector in parallel.
+
+    Another thing to notice here is that we have several nodes at depth ~0~.  We probably want a
+    single root, though.  We could fix this by adjusting the original text and adding a set of
+    parentheses around the whole thing, or we can edit this postprocessed tree.
+
+    :  +(txt;d):("(",txt;0,1+d)
+    : (("(";0)
+    :  ("(";1)
+    :  ("3";2)
+    :  ("*";2)
+    :  ("(";2)
+    :  ("4";3)
+    :  ("+";3)
+    :  ("9";3)
+    :  ("-";1)
+    :  ("7";1))
+
+    You might be asking yourself at this point "where is this going?"  The answer is "nowhere!".
+    We're just playing around to see the kind of thinking that goes into working with these
+    structures.
+
+    But as long as we're playing around with something familiar, we probably want these (infix)
+    operators to be parents of the things they're operating on.  We could try to manipulate the
+    depth vector, but if we keep the order the same, we'll not be in DFS preorder.  Let's instead
+    make a parent vector and rewire it.
+
+    :  :p:{|/0,&x=-1+*|x}',\d
+    : 0 0 1 1 1 4 4 4 0 0
+    :  txt@=p
+    : !/+((0;"((-7")
+    :     (1;"3*(")
+    :     (4;"4+9"))
+
+    Currently, each infix operator is a sibling of the element to the left and the right.  A
+	sibling which is an open parenthesis represents the subexpression (i.e. subtree).  Instead,
+	we'd like to have each node to the left and the right of an infix operator have the infix
+	operator as its parent.  Note that these are the indices to the left and the right /among its
+	siblings/.  This means we'll have to do this per grouping.
+
+    :  =p
+    : !/+((0;0 1 8 9)
+    :     (1;2 3 4)
+    :     (4;5 6 7))
+    :  {y@(0;-1 1)+/:&~^"+*-%"?x@y}[txt]'.=p
+    : !/+((0;,(8;1 9))
+    :     (1;,(3;2 4))
+    :     (4;,(6;5 7)))
+    :  |+,/{y@(0;-1 1)+/:&~^"+*-%"?x@y}[txt]'.=p
+    : ((1 9;2 4;5 7)
+    :  8 3 6)
+    :  :p:@[p;;:;].|+,/{y@(0;-1 1)+/:&~^"+*-%"?x@y}[txt]'.=p
+    : 0 8 3 1 3 6 4 6 0 8
+
+    This isn't great for visualizing.  Let's use something like the depth from parent calculation
+    above.
+
+    :  `0:(" ",txt)@(1+!#txt)*/:-1+2*~=':(p@)\!#p
+    : ((3*(4+9-7
+    :  (3*(4+9-7
+    :  (3*(4+9 7
+    :   3*(4+9  
+    :   3 (4+9  
+    :      4+9  
+    :      4 9  
+
+    Okay, maybe not the best rendering, but it'll do in a pinch.  Actually, let's take a second to
+	appreciate this.  This may not be the tree redering you're used to, but you can visualize the
+	shape of the tree from the depth of the nodes.  Moreover it's pretty cheap to render from the
+	scan of the parent vector.  With a list based tree, the inner nodes are sort of buried in
+	there and harder to get your hands on.
+
+	We can see that the operators are now a level up from the things they're operating on, but
+    we're no longer in DFS preorder.  Does this matter?  Not necessarily.  We can still iterate
+    through the tree.  In fact, we can iterate through the tree to generate the DFS preorder.
+
+    :  chld:(=0N,1_p),(,0N)!,!0
+    :  :g:{,/x[z],o[x;y]'y@z}[!#p;chld]0
+    : 0 8 1 3 2 4 6 5 7 9
+
+    Just to be clear, DFS preorder /is important/ for the algorithm that converts depth vectors to
+    parent vectors, but is /not necessarily important/ for the parent vectors themselves.
+
+    This is the order we'd like the indices to be in (the grade).  Rearranging the values vector
+    according to this grade is pretty easy:
+
+    :  :txt@:g
+    : "(-(*3(+497"
+
+    But as alluded to above, rearranging the parent vector is a little trickier.  We first reorder
+    the parent vector just as we do the values vector so that they're in the same order:
+
+    :  p@g
+    : 0 0 8 1 3 3 4 6 6 8
+
+    These are now in the right order, but we're still pointing to the old indices.  What we need to
+    point to is the new location, which amounts to the grade of the grade.
+
+    :  :p:(<g)@p@g
+    : 0 0 1 2 3 3 5 6 6 1
+    :  `0:(" ",txt)@(1+!#txt)*/:-1+2*~=':(p@)\!#p
+    : (-(*3(+497
+    :  -(*3(+497
+    :   (*3(+497
+    :    *3(+49 
+    :     3(+49 
+    :       +49 
+    :        49 
+
+    Notice that this is basically reversed Polish notation with parens being explicit about which
+    values go with which operators.  Also, since the original expression was fully parenthezised,
+    each parenthesized node has a single child.  Given this, evaluating this expression as a tree
+    isn't too bad.
+
+    : chld:(=0N,1_p),(,0N)!,!0
+    :  {(n;c):(x;y)@\:z;$[~#c;.n;"("~n;o[x;y]@*c;(.n)/o[x;y]'c]}[txt;chld]0
+    : 32
+
+    We simply descend the tree, passing through each parenthesis node to it's child and evaluating
+    the operator on each of its children.  Leaves are simply eval-ed.
+
+	Before moving on, let's take a look at this last attempt to visualize the tree. Let's remove
+	the duplicates and space it out a bit.
+
+	:  `0:" "/'(" ",txt)@(1+!#txt)*/:-1+2*1_>':,/1(,1+&#*:)\=':(p@)\!#p
+	: (
+	:   -
+	:     (             7
+	:       *
+	:         3 (
+	:             +
+	:               4 9
+
+	If you stare at this for a bit, you start to want to add some characters.
+
+	:    (
+	:    │
+	:    -
+	:    ├───────────┐
+	:    (           7
+	:    │
+	:    *
+	:    ├───┐
+	:    3   (
+	:        │
+	:        +
+	:        ├───┐
+	:        4   9
+
+	We won't go into details, but it should be fairly clear that the graph we actually had isn't
+	too far off from something more traditional looking.
+
+    Let's try something else.  Say we had an simple expression that wasn't parenthesized, like
+    ~1+2+3+4~.  How do we express this as a tree?  For starters, since there is no depth, we can
+    add a dummy root and make the parent vector all zeros.  If that's not immediately obvious, it's
+    worth taking a minute to contemplate why this must be the case.
+
+    :  :p:&# \txt:"(","1+2+3+4"
+    : "(1+2+3+4"
+    : 0 0 0 0 0 0 0 0
+
+    Now how would we rewire this to do say right to left evaluation?  That is we want "long right"
+    precedence.  If we did add parens it should look like ~(1+(2+(3+4~.  One option is to simply add
+    a paren after each operator and use the parser above.  Another is to note that each operator has
+    two children:  Its left (leaf) sibling and the next operator to the right.
+
+    :  :p:@[p;;:;].(+(-1+;1_,[;-1+#txt]@)@\:op;op:&"+"=txt)
+    : 0 2 0 4 2 6 4 6
+    :  `0:(" ",txt)@(1+!#txt)*/:-1+2*~=':(p@)\!#p
+    : (1+2+3+4
+    :  1+2+3+4
+    :  1 2+3+4
+    :    2 3+4
+    :      3 4
+
+    You could apply something similar to each set of siblings for a partially parenthesized expression
+    such as "(2*8*4)+(2*(3+4+6))".
+
+    Now is this the way you want to go about creating a general expression parser?  Maybe not.
+    It's not immediately clear how to handle monadic operators like negation going down this path.
+    But then again, maybe it's not so bad.  The point of this exercise is to introduce new ways of
+    thinking about tree construction.  I personally have found this "rector set" approach pretty
+    fruitul, using it to implement a [[https://github.com/gitonthescene/ngnk-libs/blob/master/json/json.k][json parser]] and a parser of [[https://github.com/gitonthescene/ngnk-libs/blob/master/regex/re.k][regular expressions]] as well as
+    [[https://github.com/gitonthescene/ngnk-libs/blob/master/trees/lambda-calculus.k][lambda calculus evaluator]].
+
+    This last might be a good example of the shortcomings of this approach mentioned above.  It's
+    basically a term rewriting exercise.  Which means a lot of updates to the tree and thus
+    changes in the size of the tree, which means a lot of copying of the vectors involved as
+    described above.  It works, but perhaps is not the best fit.
+
+* More ramblings to come...

+ 33 - 0
ngnk-libs/trees/bst-example.k

@@ -0,0 +1,33 @@
+\l bst.k
+\d tree
+\l trees.k
+\d .
+para:`0:
+
+drw:{(p;n):((<g)@;::)@'0^x[0 2]@\:g:gt.(0^x 0 1); tree.shw[$n;p]}
+dr:drw@[;1 2 2]@
+
+data:-15?50
+t:(e;!0;!0;!0)
+rs:t ins\data
+r:*|rs
+
+para"orig"
+dr r
+drw r[1 2],,!#r[1]
+drw r 1 2 3
+
+para"Insertion"
+dr@/:1_rs;
+para""
+
+para"Indices"
+drw r[1 2],,!#r[1]
+para""
+
+para"Height"
+drw r[1 2 3]
+para""
+
+para"Delete middle index"
+dr del[r;1]

+ 50 - 0
ngnk-libs/trees/bst.k

@@ -0,0 +1,50 @@
+e:(,0N)!,!0
+
+ch:{$[z~r:c@*y@x[1] c:x[0;z],z;0N;r]}   / (lf;rg):ch[(^'[;?p]@=p;n)]@/:(<:;>:)
+btm:{*|(~^*:){x[y],y:*y}[x]/(y,0N)}     / nxt:btm[lf]rg@
+gt:{[p;n]{z,,/o[x;y]'@/1(<x@)\y z}[n;(=0N,1_p),(,0N)!,!0;0]}
+
+noreb:{y;x}
+avlreb:{rot[x;*{(~^i)&</-1 2>\:*bal[x;i:*y]}[x](1_)/y]}
+reb:noreb
+
+/ x:(d;p;n;h) y:item z:roots -> insertion index
+insi:{$[(l:y<x[2;*z])&~^c:ch[x 0 2;<:;*z];o[x;y;c,z]
+        l|~1=#c:^[x[0;*z];c,*z];z
+        o[x;y;c,z]]}
+
+ins:{r:@[x;1+!3;,;(+/*i:insi[x;y;,0];y;0)]
+     reb[@[r;0 1 3;:;(@[r 0;*i;(0=)_,;#x[1]];@[r 1;0;:;0N];@[r 3;i;|;1+!#i])];i]}
+
+del0:{@[x;1 3;:;(@[x 1;y;:;-1]
+                 x[3]{@[y;z;:;1+|/-1,y x z]}[x 0]/(^:)_(x[1]@)\x[1]y)]}
+del1:{xx:@[x;!2;:;(@[x 0;x[1]y;,;*c]
+                   @[x 1;y,*c:x[0;y];:;-1,x[1]y])]
+       @[xx;3;:;x[3]{@[y;z;:;1+|/-1,y x[z]^z]}[xx 0]/(^:)_(x[1]@)\y]}
+del2:{(lf;rg):ch[x 0 2]@/:(<:;>:)
+      x:@[x;2;:;@[x 2;i;:;x[2]@|i:y,s:btm[lf]rg y]]
+      x:@[x;0;:;@[@[x 0;x[1]y;,;y];x[1]s;^;s]]
+      (del0;del1)[#x[0;s]][x;s]}
+
+del:{x:@[x;0;:;@[x 0;x[1]y;^;y]]
+     x:(del0;del1;del2)[#x[0;y]][x;y]
+     x:@[x;1+!3;:;(-+/d)_/:((<g)@;::;::)@'x[1+!3]@\:g:<d:(~^x 1)&0>x 1]
+     @[x;0;:;(!/(<g)(!x[0];.x[0]))_#x[1]]}
+
+rot0:{xx:@[x;0 1;:
+      (@[@[x 0;(#x 1)^x[1]y;^;y];(#x 1;#x 1;y 1)^'x[1]y;(^:)_,;(y 1;y 2;y 0)]_#x 1
+       -1_@[x[1],0N;^[#x 1;y];:;(y 1;x[1]y 0;y 0)])]
+       @[xx;3;:;xx[3]{@[y;z;:;1+|/-1,y x[z]^z]}[xx 0]/(^:)_(xx[1]@)\y 0]}
+
+swp:{@[x;0 1;:;(@[@[x 0;x[1]y;^;y];(#x 1)^x[1]y;,;|y]_#x 1
+                @[x 1;x[0;y]^y;:;|y])]{@[x;|y;:;x y]}\:y}
+
+rot:{$[</-1 2>\:*b:bal[x;y];:x;]
+  (lf;rg):ch[x 0 2]@/:(<:;>:);
+  $[~h=/1 0=0<bal[x;c:@[(lf;rg);h:=/1 0=b>0]y]
+      (c;x):(is[1];rot0[x;is:{y x}\c,(lf;rg)@1~:\~h]);]
+  $[~y;(x;y):(swp[x;y,c];c);]
+  rot0[x;{y x}\y,ch[x 0 2]@/:(<:;>:)@1~:\h]}
+
+bal:{-/+(-1^x[3]@cd@!2;x[2]@y,*cd:x[0;y]^y)} / bal[bst;ix;] -> (heavy;side)
+

+ 57 - 0
ngnk-libs/trees/examples.k

@@ -0,0 +1,57 @@
+\l trees.k
+
+TT:"ABEFG56N8PV"
+P0:P:0 0 1 2 20 4 4 6 7 20 9 10 10 10 9 14 15 20 17 18 20
+R:dfo[P]
+T:(TT 3 2 1 10 4 10 2 0 7 4 2 10 9 10 2 0 7 2 0 7 3)@<R
+
+// reorder the indices based on depth first pre-ordering
+P:redo[P;R]
+D:#'(P@)\'!#P
+`0:"\n -- vertical tree --\n"
+`0:t0[P;D],'"  ",/:T
+
+(tr;l):t1[2]@P
+`0:"\n -- horizontal left adjusted --\n"
+`0:1_,/+(tr;T@l)
+
+(tr;l):t2[2]@P
+`0:"\n -- horizontal center adjusted --\n"
+`0:1_,/+(tr;T@l)
+
+// Generate a random depth vector with x nodes (min 2)
+rd:{0,(0|x-2){x+@/1(*1?#:)\1-&(1|-_-0.25*d),1+&d:1|x}\1}
+
+sh:{(tr;l):((t1;t2)[x-1])[2]@y
+  `0:(,""),1_,/+(tr;(,/`c$97+!#y)@l)}
+
+`0:"\n -- random trees horiz left adj --"
+sh[1]'p'rd'10#10+!5;
+`0:"\n -- random trees horiz ctr adj --"
+sh[2]'p'rd'10#10+!5;
+
+// Generate random length labels
+/ pad the picture to make room
+/ center the labels
+sh:{ls:`c$97+(1+(#y)?5)?\:26
+  (tr;l):pad[p]((t1;t2)[x-1])[p:1+|/#'ls]@y
+  `0:(,""),/+(tr;ctl@ls@l)}
+
+`0:"\n -- random trees horiz left adj random width labels --"
+sh[1]'p'rd'10#10+!5;
+`0:"\n -- random trees horiz ctr adj random width labels --"
+sh[2]'p'rd'10#10+!5;
+
+`0:"\n\n -- random code --\n"
+`0:C:"@//1(*1?#:)\1-&(1|-_- 0.5*d),1+&d:1|=':{x}"
+
+/ render parse tree
+
+prstree:{ (T;D;L):+{$[`A~t:@y;((t;x;`);(`;x+1;*y)),,/o[x+1]'1_y;,(t;x;y)]}[0]`p@x
+  (D@(!#D)^1+w; L@:(!#L)^w; T@:(!#T)^w:&`A=T)}
+
+k:{$[`c~_@x;x;`k@x]}
+
+`0:"\n -- render parse tree --\n"
+(D;L;T):prstree C
+shw[k'L;p@D]

+ 263 - 0
ngnk-libs/trees/hsuthesis.k

@@ -0,0 +1,263 @@
+\l trees.k
+`0:("Tree data cut-and-pasted from Aaron Hsu's Thesis (https://scholarworks.iu.edu/dspace/handle/2022/24749)"
+   "is rendered with the algorithms here.  Feel free to match against those rendered in the thesis.")
+
+/ show tree x (given by DFS preorder parent vector) with labels y
+sh:{(tr;l):pad[p]t1[p:1+|/#'y]x;`0:"P:",`k@x;`0:(,""),/+(tr;ctl@y@l)}
+
+TL:"ABEFGLMNOPVZ"
+
+`0:"p.61"
+:D:0 1 2 1 2 3 2 1 2 3 3 2 3 3 2
+sh[p@ \D;$!#D]
+
+`0:"p.71"
+:D:0 1 2 3 1 2 3 3 4 1 2 3 4 5 6 5 5 6 3 4 5 6 5 5 6 3 4
+T:3 1 0 7 1 2 9 0 10 1 3 1 2 0 10 9 0 10 1 2 0 10 9 0 10 0 10
+K:1 0 0 0 0 1 0 1 0 1 1 0 2 1 0 0 1 0 0 2 1 0 0 1 0 1 0
+X:0 -5 0 -6 -7 0 -8 0 -5 -9 0 -10 0 0 -1 -11 0 -5 -12 0 0 -10 -8 0 -10 0 -12
+sh[p@ \D;,/'+(TL@T;$K)]
+
+`0:"p.73"
+:D:0 1 2 3 1 2 3 3 4 1 2 3 4 5 6 5 5 6 3 4 5 6 5 5 6 3 4
+sh[p@ \D;$!#D]
+
+`0:"p.79"
+D:0 1 2 3 1 2 3 3 4 1 2 3 4 5 6 5 5 6 3 4 5 6 5 5 6 3 4
+P:0 0 1 2 0 4 5 5 7 0 9 10 11 12 13 12 12 16 10 18 19 20 19 19 23 10 25
+`0:"depth:"
+sh[p@ \D;$!#D]
+`0:"parent:"
+sh[P;$P]
+
+`0:"p.80"
+P:0 0 1 2 0 4 5 5 7 0 9 10 11 12 13 12 12 16 10 18 19 20 19 19 23 10 25
+T:3 1 0 7 1 2 9 0 10 1 3 1 2 0 10 9 0 10 1 2 0 10 9 0 10 0 10
+K:1 0 0 0 0 1 0 1 0 1 1 0 2 1 0 0 1 0 0 2 1 0 0 1 0 1 0
+X:0 -5 0 -6 -7 0 -8 0 -5 -10 0 -11 0 0 -1 -12 0 -5 -14 0 0 -11 -8 0 -11 0 -14
+sh[p@ \D;TL@T]
+
+`0:"p.82"
+D:0 1 2 1 2 3 2 1 2 3 3 2 3 3 2
+sh[p@ \D;$!#D]
+
+`0:"p.93"
+P:0 0 1 2 0 4 5 5 7 0 9 10 11 12 13 12 12 16 10 18 19 20 19 19 23 10 25
+T:3 1 0 7 1 2 9 0 10 1 3 1 2 0 10 9 0 10 1 2 0 10 9 0 10 0 10
+sh[P;TL@T]
+
+`0:"p.100"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+D:0 1 2 3 4 5 4 5 6 7 6 6 7 4 5 6 5 6 7 8
+P:0 0 1 2 3 4 3 6 7 8 7 7 11 3 13 14 13 16 17 18
+T:3 1 3 2 0 10 3 2 0 10 9 0 10 2 0 10 3 2 0 10
+K:1 2 1 2 1 0 1 2 1 0 0 1 0 2 1 0 1 2 1 0
+N:0 -5 0 0 0 -2 0 0 0 -2 -6 0 -1 0 0 -2 0 0 0 -2
+
+I,:20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
+D,:7 7 8 5 6 7 6 7 8 9 8 8 9 6 7
+P,:17 17 21 13 23 24 23 26 27 28 27 27 31 23 33
+T,:9 0 10 2 0 10 3 2 0 10 9 0 10 0 10
+K,:0 1 0 2 1 0 1 2 1 0 0 1 0 1 0
+N,:-6 0 -1 0 0 -2 0 0 0 -2 -6 0 -1 0 -1
+`0:"depth:"
+sh[p@ \D;TL@T]
+`0:"parent:"
+sh[P;TL@T]
+
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
+P:0 0 1 35 3 4 3 36 7 8 7 7 11 3 13 14 13 37 17 18 17 17
+T:3 1 10 2 0 10 10 2 0 10 9 0 10 2 0 10 10 2 0 10 9 0
+K:1 2 1 2 1 0 1 2 1 0 0 1 0 2 1 0 1 2 1 0 0 1
+N:0 -5 35 0 0 -2 36 0 0 -2 -6 0 -1 0 0 -2 37 0 0 -2 -6 0
+R:0 0 0 35 35 35 35 36 36 36 36 36 36 35 35 35 35 37 37 37 37 37
+
+I,:22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
+P,:21 13 23 24 23 38 27 28 27 27 31 23 33 35 36 37 38
+T,:10 2 0 10 10 2 0 10 9 0 10 0 10 3 3 3 3
+K,:0 2 1 0 1 2 1 0 0 1 0 1 0 1 1 1 1
+N,:-1 0 0 -2 38 0 0 -2 -6 0 -1 0 -1 0 0 0 0
+R,:37 35 35 35 35 38 38 38 38 38 38 35 35 0 35 35 35
+`0:"resort parent vector"
+R:dfo[P]
+P:redo[ \P;R]
+sh[P;TL@T@<R]
+
+`0:"p.108"
+`0:"before:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+P:0 0 1 15 3 3 5 6 15 8 15 10 11 10 10 15
+T:3 1 10 4 10 1 0 7 0 7 2 0 7 9 10 3
+K:0 1 1 0 0 0 0 0 0 0 2 0 0 1 0 1
+N:0 -5 15 0 -1 -6 0 -7 0 -8 0 0 -9 -10 -1 0
+R:0 0 0 15 15 15 15 15 15 15 15 15 15 15 15 0
+sh[P;TL@T]
+
+`0:"after:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+P:0 0 1 2 19 4 4 6 7 8 19 10 11 19 13 14 15 14 14 19
+T:3 2 1 10 4 10 2 1 0 7 2 0 7 2 2 0 7 9 10 3
+K:0 -1 1 1 0 0 0 0 0 0 0 0 0 0 2 0 0 1 0 1
+N:0 -5 -5 19 0 -1 -6 -6 0 -7 0 0 -8 0 0 0 -9 -10 -1 0
+R:0 0 0 0 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 0
+sh[P;TL@T]
+
+`0:"p.110"
+P:0 0 1 15 3 3 5 6 15 8 15 10 11 10 10 15
+T:3 1 10 4 10 1 0 7 0 7 2 0 7 9 10 3
+sh[P;TL@T]
+
+/
+/ ----  Not valid yet ----
+`0:"p.112"
+P:0 0 0 1 15 3 3 3 5 6 15 15 8 15 15 10 11 10 10 15
+T:3 1 1 10 4 10 1 1 0 7 0 0 7 2 2 0 7 9 10 3
+K:0 1 1 1 0 0 0 0 0 0 0 0 0 2 2 0 0 1 0 1
+N:0 -5 -5 15 0 -1 -6 -6 0 -7 0 0 -8 0 0 0 -9 -10 -1 0
+R:0 0 0 0 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 0
+`0:"resort parent vector"
+
+R:dfo[P]
+\
+
+`0:"p.113"
+P:0 0 0 2 19 4 4 4 7 8 19 19 11 19 19 14 15 14 14 19
+R:0 0 0 0 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 0
+I:2 11 14 7
+sh[P;$R]
+
+`0:"p.114"
+P:0 0 0 2 19 4 4 4 7 8 19 19 11 19 19 14 15 14 14 19
+sh[P;$!#P]
+
+P:0 0 1 2 19 4 4 6 7 8 19 10 11 19 13 14 15 14 14 19
+sh[P;$!#P]
+
+`0:"p.117"
+`0:"before:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+P:0 0 1 2 20 4 4 6 7 20 9 10 10 10 9 14 15 20 17 18 20
+T:3 2 1 10 4 10 2 0 7 4 2 10 9 10 2 0 7 2 0 7 3
+K:0 -1 1 1 0 0 0 0 0 0 2 0 1 0 0 0 0 0 0 0 1
+N:0 -5 -5 20 0 -1 0 0 0 0 0 -2 -6 -1 0 0 -7 0 0 -8 0
+R:0 0 0 0 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 0
+sh[P;TL@T]
+
+`0:"after:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+P:0 0 1 2 20 20 5 6 7 20 20 9 9 9 10 14 15 20 17 18 20
+T:3 2 1 10 10 4 2 0 7 2 4 10 9 10 2 0 7 2 0 7 3
+K:0 -1 1 1 0 0 0 0 0 2 0 0 1 0 0 0 0 0 0 0 1
+N:0 -5 -5 20 -1 -1 0 0 0 0 0 -2 -6 -1 0 0 -7 0 0 -8 0
+R:0 0 0 0 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 0
+sh[P;TL@T]
+
+`0:"p.118"
+P:0 0 1 2 20 4 4 6 7 20 9 10 10 10 9 14 15 20 17 18 20
+sh[P;$!#P]
+
+`0:"p.121"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+P:0 0 1 2 53 4 5 5 5 8 9 0 11 12 54 14 15 15 15 18
+T:3 2 1 10 2 2 10 9 2 0 9 2 1 10 2 2 10 9 2 0
+K:0 -1 1 1 0 2 0 1 3 3 0 -1 1 1 0 2 0 1 3 3
+N:0 -5 -5 53 0 0 -1 -6 0 0 -7 -8 -8 54 0 0 -1 -6 0 0
+R:0 0 0 0 53 53 53 53 53 53 53 0 0 0 54 54 54 54 54 54
+
+I,:20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
+P,:19 18 21 0 23 24 55 26 27 27 27 30 31 30 33 30 35 0 37 38
+T,:9 0 9 2 1 10 2 2 10 9 2 0 9 0 9 0 9 2 1 10
+K,:0 3 0 -1 1 1 0 2 0 1 3 3 0 3 0 3 0 -1 1 1
+N,:-7 0 -7 -9 -9 55 0 0 -1 -6 0 0 -7 0 -7 0 -7 -10 -10 56
+R,:54 54 54 0 0 0 55 55 55 55 55 55 55 55 55 55 55 0 0 0
+
+I,:40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
+P,:56 40 41 41 41 44 45 44 47 44 49 44 51 53 54 55 56
+T,:2 2 10 9 2 0 9 0 9 0 9 0 9 3 3 3 3
+K,:0 2 0 1 3 3 0 3 0 3 0 3 0 1 1 1 1
+N,:0 0 -1 -6 0 0 -7 0 -7 0 -7 0 -7 0 0 0 0
+R,:56 56 56 56 56 56 56 56 56 56 56 56 56 0 0 0 0
+`0:"resort parent vector"
+R:dfo[P]
+P:redo[ \P;R]
+sh[P;TL@T@<R]
+
+P:0 0 1 2 53 4 5 5 5 8 9 0 11 12 54 14 15 15 15 18 19 18 21 0
+P,:23 24 55 26 27 27 27 30 31 30 33 30 35 0 37 38 56 40 41
+P,:41 41 44 45 44 47 44 49 44 51 53 54 55 56
+
+`0:"resort parent vector"
+R:dfo[P]
+P:redo[ \P;R]
+sh[P;TL@T@<R]
+
+`0:"p.123"
+`0:"before:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+P:0 0 1 2 24 4 5 6 6 6 24 10 11 12 13 13 12 16 16 16
+T:3 2 1 10 2 1 2 10 9 10 2 2 2 8 9 9 2 10 9 0
+K:0 -1 1 1 -1 0 2 0 1 0 0 2 1 2 2 1 2 0 1 0
+N:0 -5 -5 24 -6 -6 0 -2 -7 -1 0 0 0 0 -8 -9 0 -6 -10 0
+R:0 0 0 0 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+
+I,:20 21 22 23 24
+P,:19 11 11 22 24
+T,:7 9 0 7 3
+K,:0 1 0 0 1
+N,:-11 -10 0 -12 0
+R,:24 24 24 24 0
+sh[P;TL@T]
+
+`0:"after:"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
+P:0 0 0 0 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+T:3 10 1 2 10 9 10 2 1 2 0 9 0 9 10 2 9 9 8
+K:0 1 1 -1 0 1 0 2 0 -1 0 1 0 1 0 2 1 2 2
+N:0 24 -5 -5 -1 -7 -2 0 -6 -6 0 -10 0 -10 -6 0 -9 -8 0
+R:0 0 0 0 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+
+I,:19 20 21 22 23 24
+P,:24 12 24 24 10 24
+T,:2 7 2 2 7 3
+K,:1 0 2 0 0 1
+N,:0 -11 0 0 -12 0
+R,:24 24 24 24 24 0
+sh[P;TL@T]
+
+`0:"p.131"
+D:0 1 2 3 4 5 6 7 7 7 5 6 6 6 3 4 5 5 5 3 4 5 5 5 6 3 4 4 0 0 0 0 0 0 0 0 0
+sh[p@ \D;$!#D]
+
+`0:"p.137"
+I:0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+P:0 0 0 0 44 44 44 4 44 44 44 44 45 45 12 45 45 45 45 45
+T:3 10 1 2 0 1 2 7 0 10 2 2 0 1 7 2 0 10 2 2
+K:0 1 1 -1 0 0 -1 0 0 1 1 0 0 0 0 -1 0 1 1 0
+N:1 44 -5 -5 0 -6 -6 -7 0 45 0 0 0 -8 -9 -8 0 46 0 0
+R:0 0 0 0 44 44 44 44 44 44 44 44 45 45 45 45 45 45 45 45
+
+I,:20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
+P,:46 46 46 46 46 46 46 46 46 46 26 46 46 46 46 46
+T,:10 9 10 2 1 2 0 9 10 2 7 1 2 10 9 10
+K,:0 1 0 2 0 -1 0 1 0 2 0 0 -1 0 1 0
+N,:-6 -11 -8 0 -10 -10 0 -11 -10 0 -12 -8 -8 -8 -11 -8
+R,:46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46
+
+I,:36 37 38 39 40 41 42 43 44 45 46
+P,:46 46 46 46 46 46 46 46 44 45 46
+T,:2 1 2 10 9 10 2 2 3 3 3
+K,:2 0 -1 0 1 0 2 0 1 1 1
+N,:0 -6 -6 -6 -11 -6 0 0 1 1 3
+R,:46 46 46 46 46 46 46 46 0 44 45
+`0:"resort parent vector"
+R:dfo[P]
+P:redo[ \P;R]
+sh[P;TL@T@<R]
+
+`0:"p.184"
+P:0 0 1 2 0 4 5 5 7 0 9 10 11 12 13 12 12 16 10 18 19 20 19 19 23 10 25
+sh[P;$!#P]
+
+`0:"p.185"
+P:0 0 1 2 0 4 5 5 7 8 9 8 8 12 5 14
+sh[P;$!#P]

+ 194 - 0
ngnk-libs/trees/lambda-calc-example.k

@@ -0,0 +1,194 @@
+\l lambda-calculus.k
+
+para"lambda expression:"
+para s:"(^x.x y x y)z z (^y.z z)"
+para""
+
+para"parsed into a tree"
+:p:prs[s]
+para""
+
+para"displayed in a friendly way"
+dsp[s;p]
+para""
+
+para"pruned of excess nodes"
+:(n;p):alpha/prn[s]
+dsp[n;p]
+para""
+
+para" .. as a table"
+tbl[n;,p]
+para""
+
+para"table showing where parens are"
+tbl[n;(p;"("=n)]
+para""
+
+para"different table showing where parens are"
+tblb[n;"("=n]
+para""
+
+para"table showing where z's are in last lambda"
+tblmb[n;(m;("z"=n)*m:p=*|&"^"=n)]
+para""
+
+para"beta reduced"
+:(n;p):beta/prn[s]
+dsp[n;p]
+para""
+
+para"beta reduced (using currying)"
+dsp/beta/prn s
+para""
+
+para"alpha conversion"
+:(n;p):prn \"((^x.xxx)(^x.xxx))"
+alpha[n;p]
+para""
+
+para"add back in closing parentheses"
+dspf[n;p]
+para""
+
+para"actually two trees"
+para"  node at index 8 is its own parent and thus a root"
+tbl[s;,prs \s:"(^x.xxx)(^x.xxx)"]
+
+para""
+
+para"remove redundant parentheses"
+dspf/(n;p):prn "(((^x.(x(x)x))(^x.(xxx))"
+dspf/rrp/(n;p)
+para""
+
+para"some expressions"
+para"zero: ",z:"(^f.^x.x)"
+para"successor: ",sc:"(^n.^f.^x.f(nfx))"
+para"one!: ",one:sc,z
+para""
+
+para"alpha convert, do one reduction"
+para"  note that beta reduction automatically adds a root node"
+dspf/alpha/prn "(",one,")"
+dspf/beta/alpha/prn one
+para"  canonaclize names"
+dspf/alpha/beta/alpha/prn one
+
+para""
+
+para"converge..."
+dspf/alpha/(beta/alpha/)/prn one
+para""
+
+para"more ints"
+:i:{"(",y,x,")"}\(,z),5#,sc
+para""
+
+para"reduce three showing steps"
+para"  three applications of a ..."
+dspf/'rr:alpha/'(beta/alpha/)\alpha/prn i[3]
+para""
+
+\d t
+\l trees.k
+\d .
+
+para"pictures!"
+para"  we don't canoncialize here (not needed) for clarity"
+rr:beta/\alpha/prn i[3]
+shw:{(tr;lb):t.pad[pd]t.t2[pd:|/#'x]y; `0:,/+(tr;t.ctl@x@lb)}
+shw/'rr;
+para""
+
+para"numerical variables"
+loadNumerical[]
+:(n;p):prn \"(((^x.(x(x)x))(^x.(xxx))"
+dspf/(n;p)
+
+para"translate to characters"
+ALPHA@dspf/(n;p)
+para""
+
+para"alpha reduction still works"
+ALPHA@dspf/alpha/(n;p)
+para""
+
+para"beta reduction too"
+ALPHA@dspf/beta/alpha/(n;p)
+para""
+
+para"Can now do more complicated reductions..."
+para""
+rt:{x/,'"()"}
+T:rt["^x.^y.x"]
+F:rt["^x.^y.y"]
+A:rt["^p.^q.p q p"]
+O:rt["^p.^q.p p q"]
+SX:rt["^n.^f.^x. f(nfx)"]
+ADD:rt["^m.^n.^f.^x.m f (nfx)"]
+MUL:rt["^m.^n.^f.m(nf)"]
+ZP:rt["^n.n",rt["^z.",F],T]
+TIMES:!0
+clrt:{TIMES::!0}
+ev:{(r;n):prn rt[x]
+  dspf/alpha/{beta/alpha[x;y]}//(r;n)}
+    /dspf/alpha/{c:`t@0;rr:beta/alpha[x;y];TIMES,:-c-`t@0;rr}//(r;n)}
+
+TF:(dspf/alpha/prn@)'(T;F)
+
+para"OR"
++1("TF"TF?/:ev'.:')\("O,",","/,')'"TF"@+!2 2
+para""
+
+para"AND"
++1("TF"TF?/:ev'.:')\("A,",","/,')'"TF"@+!2 2
+para""
+
+para"numbers"
+N0:{rt y,x}\(,F),15#,SX
+N:ev'N0
+ALPHA@N@!5
+para""
+
+para"addition 10~7+3"
+~/(N[10];ev ADD,N0[7],N0[3])
+para""
+
+para"multiplication"
+N[6]~ev MUL,N0[2],N0[3]
++t,,~'/(,N@(*/)t),,ev'MUL,/:,/'+N0@t:!5 3
+para""
+
+para"is zero"
++1("TF"TF?/:ev'ZP,/:N0@)\0 1 3 7
+para""
+
+para"predecessor"
+PR:rt["^n.n",rt["^g.^k.",ZP,rt["g",N0[1]],"k",rt[SX,"(gk)"]],rt["^v.",N0[0]],N0[0]]
+
+N[2]~ev e: PR,N0[3]
+para""
+
+para"predecessor 2"
+PR:rt["^x.^y.^f.fxy"]
+FST:rt["^p.p",T]
+SND:rt["^p.p",F]
+SFT:rt["^p.",PR,rt[SX,rt[FST,"p"]],rt[FST,"p"]]
+base:rt[PR,N0[0],N0[0]]
+PRE:rt["^n.",SND,rt["n",SFT,base]]
+
+~/(N[13];ev PRE,N0[14])
+para""
+
+Y:rt["^f.(^x.f(xx))(^x.f(xx))"]
+TH:,/2#,rt["^x.^y.y(xxy)"]
+X:rt["^f.(^x.xx)(^x.f(xx))"]
+TRI:rt[TH,rt["^a.^n.",ZP,"n",N0[0],rt[ADD,"n",rt["a",rt[PRE,"n"]]]]]
+FAC:rt[TH,rt["^a.^n.",ZP,"n",N0[1],rt[MUL,"n",rt["a",rt[PRE,"n"]]]]]
+
+rr:ev \TRI,ALPHA@N[5]
+~/(N[15];rr)
+
+rr:ev \FAC,ALPHA@N[3]
+~/(N[6];rr)

+ 186 - 0
ngnk-libs/trees/lambda-calc-walkthrough.txt

@@ -0,0 +1,186 @@
+"Woo hoo!"                        / Let's parse lambda expressions
+:s:"(^y.z y)"                     / To keep things ASCII, we'll use a caret to denote a lambda
+                                  / spaces separate variables and for now everything will be a single character
+p:0 0 1 1 1 1 1 0                 / We're trying to produce a "parent vector"
+                                  / Here each value in this vector indicates the parent of the node at that index
++(s i;i:(1;p[1]))                 / The parent of the lambda is the open parenthesis
++(s i;i:(2;p[2]))                 / The parent of the bound variable y is the lambda, etc.
+                                  / Let's generate this from the input string
+:t:"(hey(you)(whats(new))dude)"   / But first let's parse characters at depth defined by parentheses
+{$["("=y;x+1;")"=y;x-1;x]}\[0;t]  / A simple scan like this calculates the depth
+                                  / But we want to build a parent vector ...
+                                  / The parent of any node is the index of the most recent open paren
+                                  / This means that when we reach a closed paren,
+                                  / the following nodes have the previous open paren as a parent
+                                  / Stack!
+                                  / One more thing. At each node we grow the parent vector.
+                                  / The index of the next added node is just the length
+                                  / of the parent vector before it was added, or one less after.
+`0:`k'{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}\[(,0;!0);t]
+                                  / We don't want a scan, but this shows how the result is constructed
+:p:*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0;!0);t]
+                                  / We've generated a parent vector for the tree indicated by
+                                  / nested levels of parentheses
+p@!#p                             / This is the list of parents of all the indices
+p                                 / .. of course this is just p
+p[p]                              / Here are all the grandparents of each index, etc.
+`0:`k'(p@)\!#p                    / Of course with a single tree all nodes lead back to the root
+`0:`k'1&(p@)\!#p                  / We can make a mask out of this picture
+`0:`k'(1+!#p)*/:1&(p@)\!#p        / Multiply each row by the indices offset by one
+`0:(" ",t)@(1+!#p)*/:1&(p@)\!#p   / And then prepend our string with a space to represent the zeros
+                                  / And we get a nice little picture of the depth of each node
+                                  / Add a top row with all 1's in the mask so we also get the original string
+`0:(" ",t)@(1+!#p)*/:(,1+&#p),1&(p@)\!#p
+                                  / Notice that the closed paren is a child of its matching open paren.
+                                  / Can you figure out why?
+                                  / We can clean that up, but it won't matter that much.
+:t:"(hey)(you)"                   / Here's a different "tree" and its parent
+:p:*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0;!0);t]
++(t i;i:(5;p[5]))                 / Small problem here..  The second set of parens is not actually a child of the first.
+                                  / This is actually a "forest" and not a tree.  Each trees root should be it's own parent
+                                  / The fix is first to not default to 0 as the initial root.
+                                  / Instead we use null to represent "to be determined"
+:p:*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0N;!0);t]
+                                  / And then we replace the nulls with the index in which they occur
+                                  / since root nodes should be their own parents
+:p:^'/|1(*/1(!#:)\^:)\*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0N;!0);t]
+\l lambda-calculus.k
+dsp
+dsp[t;p]
+:s:"(^y.z y)"                     / Parsing lambdas!
+:p:0 0 1 1 1 1 1 0                / The plan is to add to paren parsing by putting everything after
+dsp[s;p]                          / a lambda as a child of the lambda, all as siblings
+                                  / We can just toss the current index on the parent stack for "^" as well
+`0:`k'{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;"^"=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}\[(,0;!0);s]
+                                  / The trouble is that we only pop 1 element off the stack with each ")"
+                                  / Here we've accumulated a bunch of items which should all be popped.
+                                  / The idea is to count how many need popping and pop that count.
+prs                               / The code does exactly that.
+                                  / Going back to our earlier example for parsing just parentheses
+(t;p:*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0;!0);t:"(hey(you)(whats(new))dude)"])
+                                  / We don't really need all the closed parens any more
+                                  / The structure is clear without them.
+                                  / But if we just remove them and the corresponding indices in the parent vector
+                                  / then the indices in the parent vector will be off.
+                                  / We need a permutation of the indices so that everything besides the
+                                  / closed parens are packed together, so we can just chuck the unwanted stuff
+tblb[t;")"=t]                     / First we mark the closed parens for deletion
+tblb                              / (tblb just prints a string with pointers according to a boolean mask)
+:ro:<m:")"=t                      / Then we take the grade of that Boolean vector
+m@ro                              / Now the 1's are all at the back
+t@ro                              / And so are the parens.  But what about the parent vector?
+p@ro                              / This puts the indices in the right place, but points to the old location
+(<ro)@p@ro                        / Picking these indices out of the reverse permutation does the trick
+:(t;p):(-#&m)_/:(t@ro;(<ro)p@ro)  / Dropping the number of 1's we had from the back finishes it off
+p@t?/:"nd"                        / It may look like "newdude" is all at the same level
+dsp[t;p]                          / but they have different parents
+                                  / All of the structure is now in the parent vector and not the list of nodes
+prn                               / prn takes a lambda expression, generates a parent vector and then prunes unneeded nodes
+srt
+prn t:"(^x.x)(^y.y)"
+dsp/prn t:"(^x.x)(^y.y)"
+:t:"(hey(you)(whats(new))dude)"   / Going back to our simpler example...
+:p:^'/|1(*/1(!#:)\^:)\*|{(s;p):x;p,:*|s;$["("=y;s,:-1+#p;")"=y;s:-1_s;];(s;p)}/[(,0N;!0);t]
+:(t;p):(-#&m)_/:(t@ro;(<ro)p@ro)
+&"("=t                            / Let's try to find the scope of the tree starting at the third paren
+tbl[t;("("=t;p)]                  / That's at index 8. All of its immediate children have an 8 in the parent vector
+tbl                               / (tbl just lines up the columns of several lists with a row of indexes at the top)
+t@ \&p=8                          / But the last one starts a new group, so everything in its scope counts too
+t@ \&p=14                         / We can do it again, now there is no "(" starting a new group.
+&p=17                             / Since none start a new group none are in the parent vector.
+lc                                / This is how lc finds the last leaf in the subtree under a given node
+lc[p;8]
+                                  / Now for the fun part!!  Beta reduction
+:s:"(^x.x y x y)(z z)(^y.z z)"    / For beta reduction we want substitute into the lambda expression
+                                  / Big picture:
+dsp/ \(n;p):prn[s:s/"()"]         / We start with the pruned tree.  (Forced to a tree by surrounding with parens)
+rdx[n;p]                          / Look for "redexes"
+rrp[n;p]                          / If we find none, we simply remove redundant parens in the expression and return
+                                  / Otherwise we use the first redex to beta reduce the expression
+tblb[s;"^"=s]                     / A redex is a reducible expression, which means an application beginning with a "lambda"
+tblb[n]'(p=/:p@p@)l:&"^"=n;       / This is simply a sibling of a lambda...  (Here we show the siblings of each.)
+sb@'&'l<sb:(&'p=/:p@p@)l:&"^"=n   / ... which comes after the lambda
+tblb[n]'@[&#p;;:;1]'sb@'&'l<sb:(&'p=/:p@p@)l:&"^"=n;
+tbl[n;,p]
+rdx[n;p]                          / We return a list of indices of lambdas paired with their first sibling
+rdx
+s0:"((((^x.((xx))))((^y.y))))"    / Let's remove redundant parens from this expression
+dsp/(n0;p0):prn s0
+tblb[n0;@[&#p0;&2>#'g:=p0;:;1]]   / We start by finding which nodes have only a single child
+                                  / (i.e. have only one child in the parent vector)
+:rm:(,/"^"=n0@g@)_&2>#'g:=p0      / Then we drop those whose child is just a lambda
+                                  / since these always have only one child but are necessary
+:rm:("("=n0@)#rm                   / Then we make sure that these nodes are actually parentheses
+                                  / We'd like to simply drop them like before, but then we'll have nodes whose parents have been removed.
+                                  / So we need to find the new parent before dropping the old parent.
+                                  / In the easiest case we just lift the parent to its parent.
+                                  / But this might be being removed as well, so we have to keep moving up.
+                                  / Since the first child of any node is always one index greater than its parent,
+                                  / We can simply backup the index until we find a parent not being removed.
+:w:(&|/0,rm=\:p0)^rm              / we first find all children of nodes being removed (exclude those being removed)
+tblb[n0;@[&#p0;w;:;1]]
+(-/1(~^rm?p0@)\)/w                / Then we iteratively test if its parent is being removed and stop when it isn't
+:np:p0@(-/1(~^rm?p0@)\)/w         / The new parent is that final parent.
+tbl[n0;,p0]
+tbl[n0;,p0:@[p0;w;:;np:p0@(-/1(~^rm?p0@)\)/w]]
+                                  / Then we remove the redundant parentheses
+:(n0;p0):(-#rm)_/:srt[n0;p0;<@[&#p0;rm;:;1]]
+$[2=+/~p0;1_/:(n0;0|p0-1);(n0;p0)] / The last niggling bit is that since the root is self parenting it will never have only one child
+                                  / So when it's redundant the algorithm above won't find it.  We just shift off the parent in this case.
+:rrp[n0;p0]
+rrp
+dsp[n;p]                          / Finally we're ready for beta reduction
+:(n;p):("(",n;0,(~p=!#p)*1+p)     / We start by shifting on a root node just in case we're not given a tree
+:rd:rdx[n;p]                      / Then we see if we have any redexes, and if we do we call the main function with the first redex
+:off:*rd                          / Here rd contains the index of the lambda and of its next sibling
+:(l;e):0 1+lc[p]'off              / We find the extent of each of these nodes (add one to the end of the end of the sibling to make math easier)
+:idx:off+!e-off:*off              / Now we find all the indices in range for the substitution
+:idx:(2;1+idx?l)_idx              / And split them at the end of the lambda expression
+n@idx                             / lopping off the lambda and the variable leaving only the lambda body
+tbl[n;,p]                         / Then we lift all immediate children of the lambda to its parent
+tblb[n;@[&#p;(off=p@)#*idx;:;1]]  / Since we'll be removing the lambda, we want the each child to end up in the group
+:p:@[p;(off=p@)#*idx;:;p[off]]
+tbl[n;,p]
+:idx[0]:(n[off+1]=n@)#idx[0]      / From here on we only need to track references to the bound variable in the body of the lambda
+:l:1 0+#'|idx                     / Now we take the lengths of each of these lists, reverse it and add one
+                                  / to the length of the body of the sibling.  i.e. argument to the lambda
+                                  / This is because we're going to add a group to this to make sure it's a tree with a root
+`0:`k'@[&#p;*idx;,;(|l)#1]        / We prepare for reshuffling by assembling a grade to insert the substitition after each
+                                  / reference to a bound variable.  (i.e. idx[0])
+:ro:<,/@[&#p;*idx;,;(|l)#1]       / We take the inverse of this grade because we want to splice in nodes added at the back
+copy[n;p;e;l]                     / The copy function makes one copy of the substitution for each reference to the bound variable
+                                  / This leaves us with a copy of the argument tree for each occurence of the bound variable
+                                  / Each root is left with a null parent at its root to be filled in with where it is spliced in
+e                                 / Remember that e is the end of the argument
+l                                 / and l has the length of the argument + 1 and the count of bound variables
+(-*l-1)#/:e#/:(n;p)               / This chops off the argument section of both the node and parent vectors
+:rr:("(";0),'(-*l-1)#/:e#/:(n;p)  / And this prepends a root node for us to hook into where the corresponding bound variable was
+(*l;#'rr)                         / Now *l should match the length of this constructed substitution
+*/l                               / We want *|l copies of this, which amounts to collection */l of each of n and p
+:(sn;sp):(*/l)#/:rr
+                                  / But the parent vectors refer to the old location of this tree, this needs adjusting
+#/|l                              / This is the length of each copy
+&#/|l                             / This tags each character of each copy
+(*l)*&#/|l                        / This turns the tags into offsets for each character since each copy is *l long
+e-*l                              / This is the original index of substitution (the + 1 is already factored in)
+0|sp-e-*l                         / This shifts down the parent vector values accordingly
+:rr:(#p)+(0|sp-e-*l)+(*l)*&#/|l   / Finally we find their new position when we append to the original vectors
+(*l)*!*|l                         / This is the position of the start of each copy
+:sp:@[rr;(*l)*!*|l;:;0N]          / We turn this roots into nulls to be replaced when we're splicing them in
+copy
+:(n;p):(n;p),'copy[n;p;e;l]       / Now we append this onto the original vectors
+:p:@[p;&^p;:;p@*idx]              / And swap in the indices of the parent each bound index to the root of the copies
+                                  / i.e. we make each copy a sibling of its respective bound variable
+:(n;p):srt[n;p;<ro]               / .. and use the previously constructed grade to move these copies into place
+tbl[n;,p]
+dsp[n;p]
+:l:(off+!2),,/idx                 / These are the *original* indices of all of the stuff no longer needed
+ro@l                              / And here's where they are after the shuffle
+(-#l)_/:srt[n;p;<@[&#p;ro@l;:;1]] / We then remove these indices as before
+beta0
+beta
+dsp/(n;p):prn s
+dsp/beta/(n;p)
+
+
+/ FIN

+ 109 - 0
ngnk-libs/trees/lambda-calculus.k

@@ -0,0 +1,109 @@
+imp:{(.0#`)`$(x,(1&#x)#"."),/:y}
+
+(P;L;C;D;N):(=)@/:(Pc;Lc;Cc;Dc;Nc):Cs:"(^). "
+LP:|/"^("=\:
+
+/ parse vector of nodes into a parent vector
+prs:{(!#x)^'*|{(s;p):x;p,:**|s
+     $["("=y;s,:,(-1+#p;1)
+       "^"=y;s,:,(-1+#p;1+*|*|s)
+       ")"=y;[s:(-*|*|s)_s;p:-1_p; p,:**|s]
+      ]
+     (s;p)}/[(,0N 0;!0);x]}
+
+/ re-sort both the values and parent indices according to a permutation
+srt:{(x@z;(<z)@y@z)}
+
+/ parse and prune unnecessary nodes
+prn:{(-#&i)_/:srt.(x;prs[x];<i:|/" .)"=\:x)}
+
+/ pleasant display of tree
+dsp:{`0:(" ",x)(1+!#x)*/:(,1+&#x),~~((y@)\!#y)-\:|\*/1(y=)\!#y}
+
+/ last child of a given node in a given parent vector
+lc:{(c;r):(-1+#x;|x)
+ *|-1_(c-r?)\y}
+
+
+rrp:{[n;p]rm@:&(P n)rm:(,/L@Lc^n@g@)_&2>#'g:=p
+     p:@[p;w;:;p@(-/1(~^rm?p@)\)/w:(&|/0,rm=\:p)^rm]
+     rm,:rm2:(P n@)#2+(L n@)#&2=#'g:=p
+     p:@[p;w;:;p@p@w:(&|/0,rm2=\:p)]
+     (n;p):(-#rm)_/:srt[n;p;<@[&#p;rm;:;1]]
+     $[2=+/~p;1_/:(n;0|p-1);(n;p)]}
+
+
+copy:{[n;p;e;l]
+      (sn;sp):(*/l)#/:(Pc;0),'(-*l-1)#/:e#/:(n;p)
+      sp:@[(#p)+(0|sp-e-*l)+(*l)*(&#/|l);(*l)*!*|l;:;0N]
+      (sn;sp)}
+
+beta0:{[n;p;off]
+      (l;e):0 1+lc[p]'off
+      idx:(2;1+idx?l)_idx:off+!e-off:*off
+      p:@[p;(off=p@)#*idx;:;p[off]]
+      idx[0]:(n[off+1]=n@)#*idx
+      l:1 0+#'|idx
+      ro:<,/@[&#p;*idx;,;(|l)#1]
+      (n;p):(n;p),'copy[n;p;e;l]
+      p:@[p;&^p;:;p@*idx]
+      (n;p):srt[n;p;<ro]
+      (-#l)_/:srt[n;p;<@[&#p;ro@l:(off+!2),,/idx;:;1]]}
+
+rdx:{[n;p]{y;~^x}[w]#+(l;w:*'s@'&'l<s:(&'p=/:p@p@)l:&L n)}
+
+beta:{[n;p](n;p):(Pc,n;0,(~p=!#p)*1+p)
+      $[~#rd:rdx[n;p];:rrp/(n;p);]
+      rrp/beta0[n;p;*rd]}
+
+alpha:{[n;p]((+(,n),,@/|1(`c$"`"+!1+|/)\?/|1?:\i*m)@'m:(~LP n)*~^i:*'(~:)_'+i*n=/:(Nc,n)@(2+i)*L n@i:(p@)\!#p;p)}
+
+dspf:{[n;p]{$[P x@z;Pc,(,/o[x;y]'y@z),Cc
+   L x@z;Lc,c[0],Dc,1_c:,/o[x;y]'y@z
+   x@z]}[n;^'[;?p]@=p]0}
+
+/
+/ Left associate (not needed)
+la:{[n;p]g:-1_'g@!d:(#p)+=(P n@)#&(0<)#-2+#'g:^'[;?p]@=p0:p
+    $[~#d;:(n;p);]
+    (n;p):(n,(#e)#Pc;@[p,e:,/d;;:;].(g,'.d;(.(,/|1*:\)'|'d),'(!d),'.-1_'d))
+    ro:<(!#p0),&#'d
+    srt[n;p;ro]}
+\
+
+/ Leave as numbers
+
+\d numerical
+imp:(.0#`)`imp
+
+(Pc;Lc;Cc;Dc;Nc;Cs;prn):imp[""]@$`Pc`Lc`Cc`Dc`Nc`Cs`prn
+alpha:{[n;p]((+(,n),,@/|1(4+!1+|/)\?/|1?:\i*m)@'m:(~ LP n)*~^i:*'(~:)_'+i*n=/:(Nc,n)@(2+i)*L n@i:(p@)\!#p;p)}
+
+ALPHA:Cs,`c$"a"+!26
+TONUM:{[n;p](ALPHA?n;p)}
+prn0:prn
+prn:TONUM/prn0@
+
+(P;L;C;D;N):(=)@/:(Pc;Lc;Cc;Dc;Nc):Cs?(Pc;Lc;Cc;Dc;Nc)
+Nc:0N
+LP:|/(Pc;Lc)=\:
+\d .
+
+loadNumerical:{ (Pc0;Lc0;Cc0;Dc0;Nc0;Cs0):imp["numerical";$`Pc`Lc`Cc`Dc`Nc`Cs]
+ (ALPHA0;alpha0;prn0;P0;L0):imp["numerical";$`ALPHA`alpha`prn`P`L]
+ Pc::Pc0
+ Lc::Lc0
+ Cc::Cc0
+ Dc::Dc0
+ Nc::Nc0
+ Cs::Cs0
+ ALPHA::ALPHA0
+ alpha::alpha0
+ prn::prn0
+ P::P0
+ L::L0 }
+
+para:{`0:x}
+tbl:{`0:,/'$/|1(-:1+|/#',/)\$(!#x;x),y}
+tblb:{`0:(x;" ^"@y)}
+tblmb:{`0:(,x),(" -";" ^")@'y}

+ 20 - 0
ngnk-libs/trees/quadtree-example.k

@@ -0,0 +1,20 @@
+\l quadtree.k
+grd:(0 0 0 0 0 0 0 0
+     0 0 0 0 0 0 0 0
+     0 0 0 0 1 1 1 0
+     0 0 0 0 1 1 0 0
+     1 1 1 1 1 1 0 0
+     1 1 1 1 1 1 0 0
+     1 1 1 1 0 0 0 0
+     1 1 1 1 0 0 0 1)
+
+/ quadtree from grid
+:c:0N 4#1_qt[grd;8]
+
+drw@c
+
+/ extract the nodes at indices 1 and 2 and reattach
+drw@ro.(am/[(,0;,1 0 2 0);;].(2 3;ex[c]'1 2))
+
+/ ditto but attach to SE and NW nodes
+drw@ro.(am/[(,0;,0 1 2 0);;].|'(3 1;ex[c]'1 2))

+ 37 - 0
ngnk-libs/trees/quadtree.k

@@ -0,0 +1,37 @@
+/ z-order
+ds:(0 0;0 1;1 0;1 1)
+
+/ parent vector from color vector
+prt:{0,p@(!#p)@&(#p:&~x)#4}
+
+/ paths to root from parent vector
+pth:{(x@)\'!#x}
+
+/ Z-order curve
+zo:{,/r+/:2*r:4/2\!x}
+
+/ render
+drw:{ d:#'ps:pth[prt[c:0,,/x]]  / depth, paths to root
+ dm:2#*s:*/'(-/1|/\d)#\:2       / size of each pixel, dimensions of graph
+ pts:{[c;s;ps;i]|(c@i;+/'*/(s*s:s@p;4!-1+p:-1_'ps@i))}[c;s;ps;&1&c]  / upper left pixels in final graph
+ `0:(,""),".#"dm#(2 0 1@{y+(~y)*x}\@[&*/dm;;:;].pts)@zo@(-1+|/d)(2*)/1       / fill out the rest of the box
+}
+
+/ quad tree from (2^N)x(2^N) grid. qt[grd;zo@N]
+qt0:{(p;s):z;|((ps@&~w),\:-2!s;@[w;&w;:;1+x@ps@&w:(f-1)=-/y@1 -1_'0 1+\:ps:p+(f:s*s)*!5])}
+qt:{0,*(#*|:){(r;q):z;0 1_'z,'qt0[x;y]@**|z}[grd;+\=':0,grd:(,/x)@<zo[y]]/(();,0,-2!y)}
+
+ex0:{a:x[1;z]
+  $[#n:,/x[0]?(n0:&'~a)+\:'1+4*z
+    o[x;y,'(1+,/(4*(!#a)+#*|y)+'n0;a);n]
+    y,'(!0;a)]}
+
+/ extract subtree from x at index y
+ex:{ex0[(0,1+&~,/x;x);(0N;!0);,y]}
+
+/ ammend z to x at index y
+/ Here z and x are (p;c) parent color pairs
+am:{x,'(y^+/1(1*(#*|x)*4*~^:)\*z;*|z)}
+
+/ reorder color vector based on parent vector
+ro:{y@<x}

+ 15 - 0
ngnk-libs/trees/quadtrees.org

@@ -0,0 +1,15 @@
+* Quad trees
+
+  A few random thoughts on implementing [[https://en.wikipedia.org/wiki/Quadtree][quadtrees]]:  For clarity let’s describe them as representing
+  pictures where each node represents a “pixel” whose size halves at each successive depth.  The
+  content of a pixel can be thought to be a color.  We can pick a color (say 0) to represent pixels
+  which are subdivided, i.e. represent inner nodes of the quad tree.  If we list the colors in BFS
+  preorder then all the children of a given parent node are listed consecutively.  Being a quad tree
+  it comes in clusters of 4.  Put this together with the coloring of parent nodes and a convention
+  for the order of the children (say NW,NE,SW,SE) then all that’s required to understand the full
+  structure is the color vector alone.  Since the root pixel is always subdivided (assuming a
+  connected tree and not a forest) it’s always color zero and always first.  We don’t really need
+  it.  What’s left is clusters of children.  We can just make this an Nx4 matrix.  Converting this
+  to a classic parent vector is trivial.  Raze, find the zeros and repeat them four at a time.  To
+  get the root back, bump up the indices by 1 and prepend a 0.
+

+ 85 - 0
ngnk-libs/trees/trees.k

@@ -0,0 +1,85 @@
+// parent vector from depth vector
+p:{r:@[;;|;]\[(!0)@&1+|/x;x;!#x];(!#x)^'r@'x-1}
+
+// parent children lookup (excludes self-parenting)
+cp:{^'[;?x]@=x}
+
+/ join the values of x in z with y.
+cxn:{r+y*r<{(y|z)&z<x}[*|x]\r*|/x=\:r:z}
+
+/ classify the types of child nodes
+/ 0 lone node, 1 leftmost node, 2 rightmost node, 3 middle node
+cls:{1+(-x=!#x)+1+@[&#x;+(-1 1_\:)'.cp[x];+;1 2]}
+
+/ spread out the nodes
+spr:{y@(-x)_,/(!#y),\:x#-1}
+
+shp:-1_#'*:\
+
+// vertical tree rendering
+sh0:{,/'(" ";"┬";"─";"├";"└";"│")x}
+t0:{t:(1+^x?!#x)*+s1:(!1+|/y)=\:y
+ t+:(3+|/'|0>':|\|(!#x)=/:x)*+1_s1,,0
+ sh0 cxn[1 4;5;t]}
+
+// horizontal tree rendering
+sh1:{,/'(" ";"┬";"│";"├";"┐";"┬";"─")x}
+t1:{t:spr[x]@+/(0N;t)*~:\=':t:t@\:!|/#'t:|'(y@)\'(!#y)^y
+  t0:cxn[3 4;6;0^cls[y]@t]
+  (sh1 0^+t0;+t)}
+
+sh2:{,/'(" ";"┬";"│";"┌";"┐";"┬";"─";"│";"┴";"┼")x}
+
+/ center adjust horizontal position
+ctr:{$[^(!x)?z; y
+       1~c:#x@z; @[y;z;:;@[y:o[x]/[y;x@z];*x@z]]
+       @[y;z;:;(-c)!+/@[y:o[x]/[y;x@z];x@z]]]}
+
+/ center labels
+ctl0:{<>((#c)#!2)@&c:,/+(((-':i)-(0,-1_x@i))+(-1_+/1(-|0,|1_)\0,-2!_-0.5+x@i);x@i:&1&x)}
+ctl:{ m:(~&/^:)''x
+  d:,/'(~&//'^:)#'x
+  (d@'ctl0'm*#''x)@\:!#*x}
+
+pad:{p:x#/:" ",0N;p,/:'y,\:'p}
+
+/ mark where node splits to children
+frk:{[i;t;r;y;p]shp[t]#@[0^r[cls[y]];;:;].1(7+0 1 0N 2@-2+3^r[cls[y]]@)\1+i@?y@p}
+
+t2:{t:spr[x]@+/(0N;t)*~:\=':t:t@\:!|/#'t:|'(y@)\'(!#y)^y
+  (h;d):&~^t
+  p:(~^:)#,/t
+  h:ctr[cp@y]/[h@<p;&y=!#y]
+  i:(#*t)/(h;d@<p)
+  r:@[,/0N+t*0;i;:;]
+  (sh2@+cxn[3 4;6;frk[i;t;r;y;p]];+shp[t]#r[!#y])}
+
+// depth first preordering of indices
+dfo:{<,/{y,,/$[^(!x)?y;!0;o[x]'x[y]]}[cp[x]]'&x=!#x}
+
+// Change all the index references in x "under" permutation <y
+redo:{y@x@<y}
+
+// left vector from parent.   assumes dfs preorder
+lfp:{@[!#x;;:;].1{-1_(*x),x}'\.cp[x]}
+
+// right vector from parent.  assumes dfs preorder
+rfp:{@[!#x;;:;].1{1_x,*|x}'\.cp[x]}
+
+// right vector from left.    assumes dfs preorder
+rfl:{c-1+(|!c)^'(|x)?!c:#x}
+
+// left vector from right.    assumes dfs preorder
+lfr:{(!c)^'x?!c:#x}
+
+// first left child.          assumes dfs preorder
+/ *'cp[x]
+flc:!/1(1+)\?:
+
+// first right child.         assumes dfs preorder
+/ (*|)'cp[x]
+frc:{(?x)!(rfp[x]@)/1+?x}
+
+/ simple interface to printing
+shw:{(tr;lb):pad[pd]t2[pd:|/#'x]y
+ `0:,/+(tr;ctl@x@lb)}

+ 31 - 0
ngnk-libs/tutorial/README.org

@@ -0,0 +1,31 @@
+* Simple tutorial runner
+  This tutorial runner can be used to make simple guides through [[https://ngn.codeberg.page/][ngn/k]] functionality.  It takes a
+  script with executable code and comments and prints the code/comments line by line with lines
+  being advanced by hitting return.
+
+  If instead of return some code is typed by the user then that code is executed offering an
+  opportunity for a more interactive experience.
+
+* API
+  When run from the command line, this simply takes a list scripts to be run.
+
+* Sample run:
+
+  : $ k tutorial.k ktour.txt
+  :     1 + 1     / basic arithematic can be done with infix notation
+  :   2
+  : 
+  :     5 * 7
+  :   35
+  : 
+  :     2 - 4
+  :   -2
+  : 9-8    // <--- this is me typing, and then continuing
+  : 1
+  : 
+  :     3 % 2     / division is done with % since / serves other purposes like the beginning of a comment!  :D
+  :   1.5
+
+* Inspiration
+  This was written after trying a few of the excellent tutorials on the [[https://appadvice.com/app/j-programming-language/532587550][J app]] for the [[https://www.jsoftware.com/#/][J language]].
+  

+ 390 - 0
ngnk-libs/tutorial/ktour.txt

@@ -0,0 +1,390 @@
+1 + 1                 / basic arithmetic can be done with infix notation
+5 * 7
+2 - 4
+3 % 2                 / division is done with % since / serves other purposes like the beginning of a comment!  :D
+2 ! 37                / This is 37 modulo 2.  Note that the modulus is on the left.  (explanation to follow .. maybe)
+3 * 4 + 1             / evaluation happens from right to left.  There is no precedence between operations.
+3 * (4 + 1)
+(3 * 4) + 1           / Use parentheses when you want to control the order of evaluation
+1 2 3 + 3 0 2         / rank polymorphism means these operations work for vectors of equal length
+1 2 + 3 0 2           / you get an error if these vectors are not equal length
+1 + 3 0 2             / by scalar extension however, you can add scalars to vectors.
+                      / it's as if the scalar were repeated until the necessary length
+# 9 8 4 5             / length counts the length of a vector
+                      / unlike the previous examples, # takes one argument.  single arguments are always taken to the right
+2 4 , 9 0 4           / concat joins two vectors
+# 2 4 , 9 0 4
+#2 4,9 0 4            / spaces around built-in functions (called verbs in the lingo) are not necessary
+                      / but we'll use them here occasionally for a bit longer
+,1                    / enlist puts its argument in a list
+                      / this shows the meaning of "," is overloaded.  this happens a bunch in k, but often the meanings are related
+# ,1                  / this is a list of length 1
+# 1                   / hmm... scalars have length??  take this on faith for now that this comes in handy
+@ 1                   / @ returns the type of its argument, this is an "int".
+                      / oh, verbs which take one argument are called "monadic" and ones which take two "dyadic"
+@ ,1                  / this is a list of ints.  Capitals indicate lists. (lists, vectors, same thing)
+,3 7 9                / You can enlist a list
+# ,3 7 9              / this is a list of length 1
+* 3 7 9               / this takes the "head" of the list.  i.e. the first element.
+                      / (not related to multiplication but another case of overloading)
+*,3 7 9               / the head of an enlisted list is ... the list.  makes sense.
+(,1 2 3),(,3 5 0)     / the concat of two enlisted lists to form a list of lists
+(1 2 3;3 5 0)         / (Parens around semi-colon separated items form explicit lists)
+#(,1 2 3),(,3 5 0)    / .. of length two
+(,1 2 3), ,3 5 0      / the parentheses on the right are not necessary because of right-to-left evaluation
+(0 1 3;99;6 1)        / explicit lists need not be uniform (neither length nor type)
+@"string"             / strings are lists (capital C) of chars.
+@"s"                  / a single char is also surrounded with double quotes. (this can be a bit confusing at first)
+@,"s"                 / to get a list of a single char, use enlist
+("n";"i";"c";"e")     / double quotes are just syntactic sugar to make lists of chars.
+"o","k"               / Oh yeah!  Concat can also concatenate two scalars
+"s","tring"           / ... or a scalar to a vector.  Note that scalar extension does *not* apply here.
+("o";,"k";4 9 2)      / An even more mixed array
+#'("o";,"k";4 9 2)    / verbs can be modified to perform derived functionality
+                      / here ' (called each) modifies length to perform length on each of the elements of its list argument
+,'4 5 6               / each is called an adverb and can be applied to other verbs
+                      / enlist each element of 4 5 6 to get a list of three one-element lists
+3 9 2,'-1 -4 0        / each can be applied to dyadic verbs and pairs up the elements of both args
+0,'3 4 1              / scalar extension applies to such pairing as well
+0,/:3 4 1             / but there is a more explicit way to indicate that the left argument stays the same
+                      / this adverb is called eachright
+1 0,/:3 4 1           / It's useful when scalar extension doesn't apply
+1 0,\:3 4 1           / There is an eachleft as well.
+                      / Are we having fun yet??
+                      / More verbs!
+!8                    / enumerate numbers from 0 up to (but not including) 8
+#!8
+3#!8                  / "take" the first three number of this list
+3_!8                  / "drop" the first three numbers of this list
+-3#!8                 / negative numbers to take from the back of the list
+-3_!8
+9=3                   / just plain equals.  true is 1 and false is 0.
+3=3
+4=!6                  / oho! scalar extension
+0 1 2=!3              / and rank polymorphism!
+0 1 2~!3              / match.  i.e. check that the whole vector is the same
+(0;1 2 3)~(9=3;1+!3)  / even for mixed vectors
+(0;1 2 3)=(9=3;1+!3)  / rank polymorphism applies to ragged shapes as well.  shapes must match (clearly)
+3>4                   / plain old greater than
+3<4
+~3=3                  / monadic ~ is not
+~3>4                  / less than or equal to is just not greater than.
+~3>3
+@7%3                  / oh, yeah.  floats are not ints
+_7%3                  / floor strips off everything after the decimal point
+@_7%3                 / .. and converts to ints
+_7                    / floor of an int is fine
+-_-7%3                / There is no ceiling, but this emoji does the trick
+_"Hey, Dan!"          / lower case for strings.  (similar-ish..)
+%25                   / monadic % is square root
+-35                   / this is just an integer
+-(4 8 5)              / this is the negate verb applied to a list
+-4 8 5                / this is just a list of integers
+-:4 8 5               / a colon forces this to be treated as a monadic function
+                      / Notice that this monadic function applies to each element of the list unlike length (#)
+                      / (colon has many more tricks up its sleeve...)
+|-:4 8 5              / this is that list reversed
+-4|8                  / maximum of -4 and 8 (clearly no relation to the monadic reverse above)
+-4&8                  / minimum of -4 and 8
+("abc";!3)            / the "matrix" made of the rows "abc" and !3.
+                      / lists of lists of equal length can be thought of a matrices
++("abc";!3)           / flip!  The transpose of that matrix.  Again matrices must have rows of equal length.
+("car";37;1 3 4)[0]   / this long and no mention of array indexing??
+("car";37;1 3 4)[0 2] / indices can be lists
+("car";37;1 3 4)[0 0] / lists can have repeats
+1 4 9[2]              / no need for explicit list notation
+1 4 9[(0;2 1;(,1;0))] / more generally, indices can be any shape
+                      / the result matches the shape of the indices and picks out the values at the given index
+1 4 9@(0;2 1;(,1;0))  / brackets are just syntactic sugar for the @ verb (called amend)
+1 4 9[1 3]            / outdexing results in a null
+^1 4 9[1 3]           / ^ tests for null
+99^1 4 9[1 3]         / With a value (atom/scalar) to the left fills all nulls with that value
+                      / More adverbs!
++/3 5 9               / slash as an adverb is "left fold" with the accumulator starting as the first element.
+                      / Note here that the verb + is dyadic, but the derived verb +/ is used monadically
+10+/3 5 9             / used dyadically, the left arg becomes the initial value for the accumulator
+                      / slashes used as comments must be preceded with a space
+                      / slashes used as adverbs must *not* be preceded by a space
+|/-4 8 5              / this is "max over".  I.e. the fold of max over the list -4 8 5
+|-4 8 5               / again, this is reverse
+|:-4 8 5              / this forces | to be read as its monadic form
++\3 5 9               / the adverb scan is like fold, but produces all of its intermediate results
+10+\3 5 9             / when using an explicit initial accumulator, the inital value is not part of the results
++':9 4 2              / pairs up each element of its list with the prior element and applies the verb. ': is called each-next.
+                      / the first element is left as is
+1+':9 4 2             / but you can supply a seed value to pair it with.
+1-':9 4 2             / Note that the prior element becomes the right arg of the verb
+                      / There are a couple of funky adverbs which modify non-verbs, which we'll call nouns
+" "\"Yo yo yo"        / Split takes a string and splits the argument it's given by it.
+                      / Actually here it's technically taking the *character* " ", but that works too
+"--"\"Yo--yo--yo"
+" "/("Hey"; "you")    / Join takes an array of strings as its argument
+10\12345              / Encode represents the given number as digits with the given base
+2\13
+10/9 8 7              / Decode calculates the value of a list of digits in the given base
+2/1 0 1 1 0 1
+24 60 60/1 2 3        / The base for each digit need not be the same
+                      / Note that because of how this works, the top base is irrelevant, but needed to match the number of digits
+-2 60 60/1 2 3
+10 10\976             / For encode, if you supply a list of bases instead of a scalar,
+                      / you always get as many digits as the length of that list
+2 2 2 2 2\5
+0 2 2 2\134           / If the top base is zero, it just returns whatever is left over after decoding the other digits
+2/0 2 2 2\134
+                      / Back to verbs for a bit...
+8#1 2                 / Take can take more items than the list provided in which case it just cycles through
+5#2                   / You can even give take a scalar to simply repeat the value
+(5#2)\5               / Once again, parentheses are needed here because evaluation is from right-to-left
+6 5#1 2               / You can even use a vector with take to generate lists of lists, one row at a time
+                      / In this case it's often called "reshape" but the idea is the same
+2 5_!10               / Drop with a vector becomes "cut".  The vector is split at the given indices and the first part is dropped.
+0 3 7_!10             / To keep the first part, just make 0 the first element of the vector
+&0 0 1 0 1 1          / Given a boolean list, where (&) gives the indices of the 1's
+&1 0 2 3 0 4          / Actually, this is just a special case of "replicate" which generates a given number of repeats at the given index
+                      / Here there is 1 zero, no ones, two twos, three threes, etc.
+?&1 0 2 3 0 4         / Monadic ? (distinct) only keeps the first occurence of each element in a list
+? 7 8 2 3 7 1 3
+7 8 2 3 7 1 3?1       / Dyadic ? (find) returns the index of the first occurence of the given element in a list
+7 8 2 3 7 1 3?6       / If not found you get back a null
+
+                      / Now things get a little tricky..
+                      / Some verbs behave differently when given different types of arguments.
+                      / This is kind of true with reshape and cut, but the behaviors weren't too different
+                      / So this next may seem a bit random ...
+15?3                  / Dyadic ? with an integer left argument is "roll".
+                      / It generates that many random numbers from 0 to the right argument
+15?"ace"              / If given a list as its right argument, it randomly picks elements from that list
+-5?5                  / Deal is like roll only it doesn't repeat
+?5                    / As a monadic function ? returns values from a uniform distribution
+                      / Phew!!  There's a lot here.  Grab a hot tea and let some of this sink in a bit.
+
+
+                      / Feeling refreshed?  Let's introduce a few more types...
+(1;"a";3%2)           / so far, we've seen ints, chars and floats
+@'(1;"a";3%2)         / with scalar types `i, `c and `f
+                      / Hmm...  What is `i?
+@`i                   / A new type!  `s represents the symbol type
+@`symbol              / Symbols are basically scalar strings.  In particular are used to represent types.
+@"symbol"             / Remember vector types are represented with upper case letters.  This is a vector of `c elements.
+#`symbol
+#"symbol"
+@`i`c`f               / Vectors of symbols can be represented by listing them one after another
+                      / This is sometimes called "stranding".  Stranding is only possible for homogenous types
+@1 2 3 5              / This is why vectors of integers can simply be listed one after another
+@(`i;`c;`f)           / Of course explicit array notation is still possible
+@@'(1;"a";3%2)
+@'(1 2;"ab";3%2 1;`i`c)
+1 0.3 2               / There is some magic here and there.  Here the ints are "promoted" to floats
+1 2 3+4 5 6           / Also, it's worth explicitly noting that stranding binds tighter than any of the verbs
+(1;2;3+4;5;6)         / This is something different.
+1 2,(3+4),5 6         / You could also make this by building it up with the verb concat.
+                      / The parentheses are necessary because of right to left evaluation
+@(1;"a";3%2)          / Before we get too far away it's also worth noting that @ operates on the whole array
+                      / This is a single type known as a "mixed array" and is represented by `A
+@'(1;"a";3%2)         / "each" is needed to operate on each element
+#'(1 2;"abcd";3%2 1;`i`c`s`A`C)  / similar to length
+#(1 2;"abcd";3%2 1;`i`c`s`A`C)
+                      / Over time you get used to which functions do this and which ones "permeate".
+                      / i.e. operate on each element naturally like + or *
+
+                      / Let's introduce one more type: dictionaries
+`a`b`c!3 4 5          / Dictionaries act like association lists and created with dyadic ! operating on two lists of equal length
+!`a`b`c!3 4 5         / Monadic ! on a dictionary returns the keys of the dictionary
+.`a`b`c!3 4 5         / Monadic . on a dictionary returns the values of the dictionary
+(`a`b`c!3 4 5)[`b]    / Indexing can be used to extract the value associated with a given key
+
+                      / It's probably best to start introducing some programming basics before moving on
+d:`a`b`c!3 4 5        / : is used for assigning to a variable  (We told you colon had more tricks!)
+d                     / See?  (This tutorial is running in a single session so this variable is still visible.)
+d[`b]                 / That looks nicer.  We've extrated the value out of the dictionary d associated with the key `b
+                      / It may worth noting that variables are not symbols and not strings.  They use no punctuation.
+
+i123:1 2 3            / You can use numbers in the name but initial character must be a letter
+i123[1]
+i123@1                / Remember that you can use @ to index into an array
+d@`c                  / Or even for looking up in a dictionary
+i123 1                / You can also just list the two next to each other!  (This is *not* stranding!)
+d`c                   / You don't even need a space when it's not ambiguous
+i123:                 / Variables don't go away, but you can (re)assign them to "nothing" if you want to free memory
+i:1 2 3               / Different variable
+i 1                   / Here we need a space because i1 would be a(n undefined!) variable name.
+d:`a`b`a!3 4 5        / Dictionaries can be weird.  You can repeat keys.
+d`a                   / Only the first one is found with lookup
+(!d;.d)               / But both original lists are still in the keys and values
+                      / BTW, there is no "iter".  Keys and values must be extracted separately
++(!d;.d)              / Remember "flip"?  This gets the list of key/value pairs
+(.d)(!d)?`a           / This is basically what dictionary lookups do...
+(!d)?`a               / This extracts the keys and then uses "find" to find the index of the key
+(.d)@(!d)?`a          / This extracts the values and uses the previously found index to index into the array
+(.d)(!d)?`a           / But because there's no ambiguity (really!) you don't need the @ here.  It's not stranding so it's indexing.
+                      / The parentheses are needed because of right to left evaluation.
+
+                      / More programming basics ... and a new type!  lambdas!
+f:{x}                 / Lambdas are formed with curly braces.  This takes a single arg (x) and returns it.
+(f[1];f[`a];f[3%2])   / The lambda can be applied to arguments with brackets similar to array indexing
+(f@1;f@`a;f@3%2)      / Or with @ ..
+(f 1;f`a;f 3%2)       / Or even just juxtaposition ..
+f'(1;`a;3%2)          / lambdas take adverbs just like verbs do
+{x+y}[2;3]            / Brackets are (generally) necessary when supplying more than one arg
+f                     / Also, lambdas don't have to be assigned to a variable the way f was
+f:{x+y};f[1;2];f[3;4] / A semicolon can separate multiple statements on a line.  Only the output of the last is printed
+f:{x+y};f[1;2];       / A trailing semicolon means the last statement was an "empty statement" and so nothing is printed
+{a:x+y;2*a}[1;3]      / Multiple statements can of course be used inside a lambda as well
+                      / Kind of difficult to demonstrate in this format, but the semicolon can be replaced by a newline
+                      / only if the following line begins with at least one space.
+                      / E.g. {a:x+y
+                      /         2*a}
+                      / But this also means that the closing brace must be on the last line with a statement
+                      / In this example {a:x+y
+                      /                    2*a
+                      /                  }
+                      / The last line is an empty statement and so prints nothing.
+{x*y+z}[2;3;4]        / Functions can use up to three implicit arguments which have the names x, y and z
+f:{x+y}[2]            / If fewer args are supplied than required, a "projection" is formed
+@'({x};{x+y}[2])      / Projections are actually a different type, but this doesn't come up that often
+f'3 4 5               / Projections basically "curry" the supplied argument
+{[a;b;c;d]a+b*c-d}    / More than three arguments requires explicit argument declaration with brackets
+{[x;y]x+y}[2;3]       / This can get noisy for simple stuff which is why implicit args exist
+{2*y}[1;3]            / If you use an implicit y then the function takes (at least) two arguments, three if there is a z.
+{2*y}[7]              / This is a projection
+{2*y}[7]@6            / When called, passes the argument to y
+f:+                   / BTW, verbs can be assigned to variables too
+f[2;3]                / But in this form must use bracket indexing instead of infix notation
+@f                    / Verbs have a different type as well
+@f:@                  / Assignment can also happen inline.  This assigns f and then takes its type. (@ is also a verb!)
+:[123;451]            / Colon is also a (dyadic) verb which returns its second argument.
+                      / .. but is weird because it's hard to parse which of its various forms is meant in the code
+:i:1 2 3              / Here it's used monadically(??) to return the value assigned to i
+123:456               / Here it's used dyadically to return the right argument
+:i:1 2 3              / This form is often useful when debugging code.
+@f:{x}'               / Derived lambdas are yet another type
+f(1;"a";3%2;`b)
+g:+; 5 g/1 2 3        / Derived lambdas actually can be used infix
+g:+/; 5 g 1 2 3       / But only with explicit modifiers.  This doesn't work.
+g:+; 5g/1 2 3         / Actually here the space before the g here is not necessary
+
+                      / What haven't we covered?...
+<"hello world"        / grade! grade returns the indices in an order which sorts the input
+s@<s:"hello world"    / (Remember we can do assignment inline)
+s@>s                  / Actually that was grade up, grade down gives the indices which sort the other direction
+                      / One subtle point is that this is a "stable ordering"
+> 3 17 9 17           / i.e. indices which point to the same value remain in the same relative order
+< 3 17 9 17           / so grade down is not simply the reverse of grade up
+::("a";98)            / :: is the identity function but can be fiddly because of the many uses of :
+(::)"same"            / Often it's safest to simply put it in parentheses
+::                    / It has the unique property that when it's the final value on a line it prints nothing
+                      / Similarly, empty values are replaced with the identity
+iden:;iden "me"       / Here iden is assigned an "empty" value, which simply means the identity function
+"happy";              / This amounts to the nitty gritty behind trailing semicolons inhibiting output
+="hello world"        / With a list = returns a dictionary
+                      / whose keys are the distinct elements and values are indices where that element occurs
+=5                    / With a single integer = returns an identity matrix of that size
+{~x}_3 0 4 0 0 6 7    / _ becomes "weed out" with a left argument
+{~x}@3 0 4 0 0 6 7    / If we apply the function to the *whole* right argument we see which elements are removed
+(~:)_3 0 4 0 0 6 7    / We don't actually need a lambda, but a few extra considerations pop up without one
+                      / First we need to surround the verb with parentheses
+                      / to ensure that we're not trying to apply it
+                      / Technically this makes a *noun* out of the *verb* which is why it isn't applied
+                      / Also, we need to make sure that the function is applied monadically, so we use :
+(~:)@3 0 4 0 0 6 7    / Can still test to see which items will be removed
+                      / Here we need @ because its left arg is a noun.
+                      / This is all pretty heady.  Mostly you just get used to the pattern.
+(~#:)@(,"I";"";"am")  / Just to emphasize, the filter function is applied to the *whole* right arg
+(~#:')@(,"I";"";"am") / For length, which doesn't "permeate" like not, we'll need each
+(~#:')_(,"I";"";"am") /   to filter out empty strings
+(~~#:')#(,"I";"";"am")  / With # (replicate) you can specify which items to keep instead of to remove
+(3!)@1+!9             / Only replicate behaves differently when the function returns non-Booleans
+(3!)#1+!9             / In this case it replicates each value according to the corresponding integer
+&(3!)@1+!9            / This is just like where's (&) behavior, except where acts on indices
+l@&(3!)@l:1+!9
+(3!)_1+!9             / Such replication doesn't make sense with weed out
+
+!4 3                  / With a list on the right enumerate becomes "odometer".
+                      / Essentially cyclically count up the bottom row and "tick" each row
+                      / when the row beneath "turns over"
++!4 3                 / Alternatively, the transpose lists all possible "samples" of !:'(4;3)
+4 3#+!4 3             /   or lists all coordinates of a matrix of shape 4 3
+
+                      / Coming round the final turn!!
+$(123;`happy)         / Monadic $ stringifies its argument.  Note this is pervasive.
+                      / i.e. that it converts at the element level and not the array level
+4$$(123;`happy)       / With an integer left argument limits to that length, padding on the right as necessary
+-4$$(123;`happy)      / A negative number pads/chops on the left
+`s$"happy"            / With a symbol on the left converts the right argument to that type when possible
+`s$123                / This errors when it's not possible
+`s$"@123="            / (Note you can make symbols of arbitrary strings by using quotes.)
+`c$104 97 112 112 121
+`i$"happy"
+0+"happy"             / Characters actually naturally convert to ints when used in an int context.  The value is the ASCII code.
+`I$"-123"             / Use capital `I to convert the entire string to an integer rather than individual characters.
+
+                      / I/O!
+1 1:"carpark"         / with a left arg prints bytes to the left arg.  Here 1 is the file descriptor for stdout
+`1:"carpark"          / An empty symbol is equivalent to stdout
+                      / See help for other options including printing to a file
+`0:("happy";"dog")    / 0: can take a list of strings as its right argument
+1:1                   / Without a left argument 1:reads bytes (buffered).  1 is the file descriptor for stdin.  (type something followed by a return)
+                      / Same for 0: for reading lines, but is terminated by EOF.  Tricker to demostrate here.
+`k@=5                 / There are a handful of functions which are under symbols
+                      / `k is the basically the function used to render K objects in the REPL as strings
+`k'=5                 / It accepts modifiers as well.  Here we render each row of this identity matrix.
+`0:`k'=5              / It's useful in this tutorial to break out of the constraints of single line outputs.
+disp:`0:`k'
+disp@!4 3
+disp@+!4 3
+disp@4 3#+!4 3
+."3+5"                / Monadic . with a string right value is "eval and evaluates the string as if it was run through the REPL.
+.`k@(1 2;"ab")        / `k is designed to be "round-trippable".
+                      / I.e. evaluating the string generated by applying it to an argument should result in the argument.
+"HI, MOM"[1 4 5]      / We talked about using brackets for indexing.
+"HI, MOM"@1 4 5       / Or alternately using the @ verb.
+@["HI, MOM";1 4 5]    / You can even use bracket indexing with verbs.  Including @ itself!
+                      / But @ is more powerful than your average verb...
+@["HI, MOM";1 4 5;_:] / With a third arg, replaces the values at that index with the (monadic) function applied to the corresponding value
+                      / Note that this returns the entire array modified at the given indices.
+@[!5;1 3 4;-;7 8 2]   / With a fourth arg, does the same only using a dyadic function using the final argument as the right arguments
+@[!5;1 3 4;:;7 8 2]   / A common use is with : as assignment
+@[!5;1 1;+;3 7]       / Remember repeat indices are fine.
+@[!5;(1;,1);+;(3;,7)] / As are odd structures as long as the shapes match.
+                      / The function is applied at the leaves when descending the structure.
+disp @[=5;1 2]        / Remembering that matrixes are lists of lists, @ simply picks out elements at that top-level list.
+disp m:4 3#!*/4 3     / But sometimes you want to dig deeper than the top-level list
+.[m;2 1]              / Applied like this . becomes "drill", which picks out elements at the given coordinates
+disp.[m;2 1;-:]       / Drill can also take a third argument
+disp.[m;2 1;*;3]      / Or a fourth
+disp.[m;(3 1;0 2)]    / Unlike @, when the second argument is a list, coordinates are generated by taking the Cartesian product
+disp 3 1,/:\:0 2
+.[m;,3 1]             / If this is a singleton list, this is the same as @
+@[m;3 1]
+
+                      / Heavy stuff..  Let's finish off with a few more control structures.
+-2 ! 37               / Before that let's sneak in integer division.  This is like modulo but with a negative modulus
+-2 2 !\: 37           / They're paired so that divmod can be calculated like so.
+                      / On to control structures...
++\1+2*!10             / We've seen fold and scan which roll up values of a list with an accumulator.
+{x+y}\1+2*!10         / As a lambda it would look like this.  Note that it takes two arguments and that the accumulator is the x argument
+{-2!x}\123678         / What if we tried a "scan" with a function which took only one argument?  Like (integer) division by 2?
+                      / Instead of scan we get "converges".
+					  / I.e. the output gets fed back into the input and applied over and over until it stabilizes.
+-2!7
+-2!3
+-2!1
+-2!0
+-2!0
+                      / So the difference between scan and converges is whether the function its being applied to takes two or one argument
+{-2!x}/123678         / There's also "converge" which doesn't produce intermediate results.  (not as good for demonstration purposes, though.)
+{4!x+1}/1             / Actually converge also stops, when the input matches the original input.  I.e. it detects complete cycles.
+/ {4!x+1}/-1          / This on the other hand, never terminates, because while it cycles it never cycles back to the beginning.
+10{4!x+1}\-1          / A left argument to converges isn't a seed. (That wouldn't make sense.)  But if it's an integer, it's a repeat count.
+                      / i.e. similar to converges, but instead of detecting when to stop it does exactly that number of iterations.
+{~x=3}{4!x+1}\1       / With a function as a left argument, that function is evaluated with each iteration
+                      /   and the process continues so long as that value is not zero.
+(::){4!x+3}\-1        / The function need not be a lambda.  Here we use the identity function.
+{~x=3}{4!x+1}/1       / Of course there are versions of each of these which do not produce intermediate results
+
+/ There's more we could talk about, but hopefully this is enough to get you started.
+/ For more personal interaction, try one of the communities: [[https://k.miraheze.org/wiki/Online_Communities]]
+/ Happy coding!
+
+
+/ FIN

+ 23 - 0
ngnk-libs/tutorial/tutorial.k

@@ -0,0 +1,23 @@
+\d tut
+evl:{.[`k@.:;,x;"\n",]}
+hdr0:{(x$($z)/"[]"),y}
+
+hdr:hdr0[6].
+ply:{(*|x)=~#*x}{inp:2(|(&\~^"\n\r\t "?)_)/1:`
+                $[(~#inp)~"\\"=*inp;`0:evl[inp];];(inp;"\\"=*inp)}/
+hdl:{n:z+*x;i:$[z;y;*|x]
+     $[~"\\"~*y;
+       z&"goto"~4#1_y;n:`I$*|" "\y;]
+       (n;i;z+1)}
+nxt:{(2>*|:){(s;i;c):x;`1:$[~c;hdr i
+           "::"~r:evl[*i];"";(2#" "),r,"\n"]
+           hdl[(s;i);*ply@0 0;c]}/(x;y;0)}
+cmts:"/"=@'/1(*'&'~^:)\
+btch0:{((x+1)$"\n")/'(&|/1(-1_0,)\~cmts@y)_y}
+btch:btch0[6]
+run:{{(*|x)<#*x}{r:tut.nxt[*|x]@@/x;(*x;$[1=(*r)-*|x;*r;(*|+*x)?*r])}/(x;0)}
+\d .
+
+/$[#x;{tut.nxt[0N]'+1(+\~tut.cmts@)\tut.btch@0:x}'x;];
+
+$[#x;{tut.run@+1(+\~tut.cmts@)\tut.btch@0:x}'x;];

+ 56 - 0
ngnk-libs/xml/README.org

@@ -0,0 +1,56 @@
+* XML parser
+  This is a very raw XML parser for [[https://ngn.codeberg.page/][ngn/k]].  More proof of concept than viable code, it may still be
+  usable for simple extraction.
+
+** API
+   In its current state possibly the only useful entry point is ~xml.parse~ which takes a string and
+   returns a pair of lists.  The first list is the [[http://nsl.com/k/tableaux/trees.k][parent vector]] and the second is list of
+   dictionaries representing the nodes.  (I'll probably revisit this representation at a later date.)
+
+   Nodes are either text nodes which look like this:
+    : `tp`loc`len`cnt!(`txt;1520;29;,(`txt;"Fri, 30 May 2003 11:06:42 GMT"))
+
+   Or tag nodes which look like this:
+    : `tp`nm`loc`len`attrs!(`tag;"/pubDate";1135;10;())
+
+** Notes
+   As I say, this is very raw.  Currently there is no attempt at substituting entity types such as
+   ~&lt;~, nor for that matter parsing a DTD whatsoever.  Some of this code could probably be used
+   for such a project though.
+
+   This does handle ~CDATA~, though as well as ~<!-- -->~ style comments.  The contents of both of
+   which are taken as raw text and not parsed in any way.
+
+   I hope to add running examples of this shortly.
+
+** Implementation
+   Parsing takes place in several phases:
+   - First comments and CDATA are identified
+   - Then quotes both single and double and tag delimiters not in either a comment or CDATA are
+     mutually quoted.  That is to say single quotes in double quotes and double quotes in single
+     quote and either /not/ inside a tag delimiter are treated as quoted and hence not "real".
+   - What's left are real quotes and real tag delimiters which can be used to split the text.
+   - Once you've identified real tag delimiters then you know the entire structure of the text.
+     This alone is used to generate the parent vector and itself does not require splitting the
+     text.
+   - Once the nodes have been split up they can be parsed for attributes, etc.
+
+** Walkthrough
+   As an experiment, I've added a script walking through bits of the code which can be used with the
+   [[https://github.com/gitonthescene/ngnk-libs/tree/master/tutorial][tutorial script]].
+
+** Going forward
+   I hope to get back to this but it's been on the shelf for a while.  Some ideas:
+   - Since you know the entire structure before doing any splitting, you could present a [[https://en.wikipedia.org/wiki/Simple_API_for_XML][SAX]]
+     interface which parses as it goes.
+   - Currently the list of nodes mixes text and tag nodes which means it's not a true table.
+     Perhaps these should be separated.  Maybe with a new vector indicating node type/index into the
+     node tables.
+   - This has been on the shelf so long, I've forgotten some of my other ideas.  Adding this for the
+     [[https://en.wikipedia.org/wiki/Rule_of_three_(writing)][rule of three]].
+   - Oh, just remembered!  The tricky part is the mutual quoting because you don't know what's real
+     up front.  You have to iteratively go through and figure out which are real as you go.  The
+     insight is that if you're lucky enough not to have anything mutually quoted then you can simply
+     treat them all as real, thus you only need to seek out where you have potential mutual quoting.
+     If you find such a situation you have to mark what's quoted and then continue.  Currently
+     instead of "continuing" I "start all over".  I suspect this could be made more efficient.

+ 9 - 0
ngnk-libs/xml/edgecase.xml

@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<simple version="2.0">
+  <![CDATA[not a tag: </simple>]]> <![CDATA[not a comment: <!-- simple -->]]>
+  <edge1><![CDATA[also not a comment: <!-- simple -->]]></edge1>
+  <edge2><!-- <![CDATA[still not a comment: <!-- simple -->]]>--></edge2>
+  <self-closing/>
+  <!-- Not closed quote ->" -->
+  <text-quote>"This is also not closed</text-quote>
+</simple>

+ 57 - 0
ngnk-libs/xml/walkthrough.txt

@@ -0,0 +1,57 @@
+:ss:"<html><!-- comment --><body/></html>" / Sample xml text with a variety of tag types
+:ds:&'"<>"=\:ss                            / find the delimiters
+:sp:&'"!?/]-"=\:ss                         / find indices "helper" characters for (sp)ecial tag types
+                                           / this produces a list of indices for each helper character
+ss@sp+1                                    / look for the characters following a helper character
+ss@sp-1                                    / look for the characters preceding a helper character
+(sp:&'"!?/]-"=\:ss)+\:/:-1 1               / Both!!
+ds[0]?sp-1                                 / See which characters preceding helper chars are opening delimeters
+~^ds[0]?sp-1                               / We don't need to know which particular delimiter
+                                           / Just that there is one
+~^ds?'(sp:&'"!?/]-"=\:ss)+\:/:-1 1         / Both!
+&''~^ds?'(sp:&'"!?/]-"=\:ss)+\:/:-1 1      / Find the indices into the lists of special characters
+                                           / Find the indices of the actual characters (adjusting back)
+:sp:-1 1+'sp@'/:&''~^ds?'(sp:&'"!?/]-"=\:ss)+\:/:-1 1
+qs:&'"'\""=\:x                             / Find both single and double quotes
+                                           / Now to tackle comments and CDATA
+sp[1]                                      / closing special delimiters
+sp[1;4 3]                                  / find "->" and "]>"
+ss@cl:sp[1;4 3]-2                          / Look back a couple of characters
+"-]"='ss@cl                                / find "-->" and "]]>"
+cl:2+cl@'&'"-]"='ss@cl                     / Keep track of where we found this
+                                           / Now look for openers
+ss@(op:sp[0;0])+\:2+!7                     / Look for the first seven characters after "<!"
+                                           / See check which of these look like the beginning of a comment or CDATA
+("--";"[CDATA["){x~(#x)#y}/:\:ss@(op:sp[0;0])+\:2+!7
+                                           / And find the original indices
+op:op@/:&'("--";"[CDATA["){x~(#x)#y}/:\:ss@(op:sp[0;0])+\:2+!7
+                                           / Now we have candidates for opening and closing comments and CDATA
+                                           / Now we have to clean things up
+                                           / What can happen?
+:ss0:"<!-- <!-- --> -->"                   / Comments don't nest!
+                                           / Actually, this is illegal syntax. "<" must only be used to open a tag
+:ss0:"<!-- &gt;!-- --> -->"                / Nothing wrong with this, though
+                                           / The comment is closed by the first closing comment
+:tst:"(open close) close) (open close)"    / Let's make it look simpler
+:oc:&'"()"=\:tst                           / indices of open and closed characters
+(#'oc)#'1 -1                               / replace open indices with 1's and close indices with -1's
+(,/(#'oc)#'1 -1)@g:<v:,/oc                 / flatten and sort by the (flattened) indices
++\(,/(#'oc)#'1 -1)@g:<v:,/oc               / Where the sum scan dips below 0 is when we have too many close chars
+                                           / We want to throw those out, so let's focus on them
+0&\+\(,/(#'oc)#'1 -1)@g:<v:,/oc            / only spots where we're zero or lower
+0<':0&\+\(,/(#'oc)#'1 -1)@g:<v:,/oc        / lower than even the previous one so another redundant close
+                                           / These are the ones we throw out
+fm:{(v@g)@&~0<':0&\+\(,/(#'x)#'1 -1)@g:<v:,/x}
+                                           / This finds the "first match" for each opening character
+@[&#tst;fm@oc;:;1]                         / For debugging let's mark which indices we've kept
+`0:(tst;" ^"@[&#tst;fm@oc;:;1])            / And line them up with the text
+                                           / Notice that fm returns a list of indices which alternates
+                                           / open and close characters
+ev:#'/|1(2*-2!#:)'\                        / ev ensures you have a list of even length
+                                           / technically shouldn't happen, but it couldn't hurt
+
+                                           / After applying fm to open and closed comments and CDATA sequences,
+                                           / We want to ensure that comments in CDATA and vice versa are just text
+ss:"(easy)([])[()][peasy]([)[)]"           / Let's simply by making using parens a braces
+                                           / The idea is to think of each as quoting the other
+                                           / and we're looking for unquoted characters

+ 40 - 0
ngnk-libs/xml/xml.k

@@ -0,0 +1,40 @@
+\d xml
+MX:0N-1
+ev:#'/|1(2*-2!#:)'\
+fm:{(v@g)@&~0<':0&\+\(,/(#'x)#'1 -1)@g:<v:,/x}
+fr:{m:2!fd:{y'x}.'x@/:i:+&~=#x
+    $[&/,//^k:m?\:0;:0N;]
+    j:*&=/1&/\MX^x[i[;0]]@'k
+    MX^1 0+x[i[j;1]]@0 1+(fd@j)@k@j}
+cl3:{r:fr@(,-1,fm@x[!2]),2_x;$[0N~r;x;x@'(&~</r>\:)'x]}
+cl2:{{r:fr@x;$[0N~r;x;x@'(&~</r>\:)'x]}/x}
+WS:" \t"
+tr:{&/(~`cmt=;(|/^" \n\t"?" ",)')@'+x}
+/tag parse
+tp:{l:@/1(&0<#:')\1_'(0,@/1(&2!{x'y}[-1+*-/x[2 0]]@)\&|/WS=\:s)_s:-1_x[1]
+    (tp;nm;loc;len;attrs):(`tag;l[0];x[0];#x[1];{(x;-1_2_y)}@/'(0,/:(1_l)?\:"=")_'1_l)
+    +(`tp`nm`loc`len`attrs;(tp;nm;loc;len;attrs))}
+pt:{c:0,((#c)#0 1)+c:@/1<:\,//x[3 4]-x[0]
+    (tp;loc;len;cnt):(`txt;x[0]
+                      #x[1]
+                      tr#+(`txt`cmt`cdata@(0,,/((#*)'x[3 4])#'1 2,\:0)@<c
+                      c_x[1]))
+    +(`tp`loc`len`cnt;(tp;loc;len;cnt))}
+ps:{!/+((pt;tp)@>/"<!"=2#x[1])x}
+cc:{cl:2+cl@'&'"-]"='x@cl:y[1;4 3]-2
+    op:op@/:&'("--";"[CDATA["){x~(#x)#y}/:\:x@(op:y[0;0])+\:2+!7
+    cl2@ev@fm'+(op;cl)}
+cl0:{[cms;cds;is]@/1(&2!(+/1(#/|(,0 1),#:)\@/1<:\cms,cds)')\is}
+cl:{[ds;qs;cms;cds;sp];(ds;qs):0 2_cl3/,/cl0[cms;cds]@''(ds;qs)
+    (ds@'&'^(,/'sp)?'ds;,/qs;cl0[cms;cds]''sp)}
+lvl0:{(+\(,/(#'x)#'2 -1 0 -2)@g)-(,/(#'x)#'2 0 0 0)@g:<,/x}
+nds0:{[xml;sep;dd],/'(nds;(.'dl)@'&'(!'dl:(@/1(=c')\)'(sep))=)@\:/:!#nds:+(c;(c:@/1<:\dd)_xml)}
+p1:{ds:&'"<>"=\:x
+    qs:&'"'\""=\:x;
+    sp:-1 1+'sp@'/:&''~^ds?'(sp:&'"!?/]-"=\:x)+\:/:-1 1
+    (ds;qs;sp)}
+p2:{(ds;qs;sp):p1[x];:(ds;qs;sp),cc[x;sp]}
+parse:{(ds;qs;sp;cms;cds):p2[x]
+       (ds;qs;sp):cl[ds;qs;cms;cds;sp]
+       (lvl0[ds,sp[;2]];ps'nds0[x;(qs;cms;cds);,//0 1+/:(ds;sp[;2])])}
+\d .

+ 0 - 30
test.k

@@ -1,30 +0,0 @@
-/ :`i$"A"; // 65
-/ :`i$"Z"; // 90
-/ :`i$"0"; // 48
-/ :`i$"9"; // 57
-/ :`i$":"; // 58
-/ :`i$"$"; // 36
-
-testAtZ: { i:`i$x; $[(i>64) & (i<91);1;0] };
-test0t9: { i:`i$x; $[(i>47) & (i<58);1;0] };
-testcol: { i:`i$x; $[i=58;1;0] };
-testend: { i:`i$x; $[i=36;1;0] };
-
-s0: ((testAtZ; 1);(test0t9; 2));
-s1: ((test0t9; 2);(testcol; 3));
-s2: ((testend; 7); (testcol;5));
-s3: (,(testAtZ; 5));
-s4: ((testAtZ; 5); (test0t9; 6));
-s5: (,(testend; 7));
-s6: (,(testend; 7));
-s7: (({`0:"SUCCESS"};7));
-// 7 is done
-states:: (s0;s1;s2;s3;s4;s5;s6;s7);
-i: 1;
-
-run: {[str] str,:"$"; `0:str; s: 0; :o:s {[s;c] r:{t: x[0][y]; o:$[t;x[1];0]; o}[;c]'states[s]; `0:`k@(s;c;ns); ns:states@(&r); ns}\str;};
-
-// todo: don't be using $
-// prepare for matching pattern and returning the start index and length
-test: "A:A";
-run[test]

+ 1 - 0
test_no_newlinesk

@@ -0,0 +1 @@
+/ :`i$"A"; // 65/ :`i$"Z"; // 90/ :`i$"0"; // 48/ :`i$"9"; // 57/ :`i$":"; // 58/ :`i$"$"; // 36testAtZ: { i:`i$x; $[(i>64) & (i<91);1;0] };test0t9: { i:`i$x; $[(i>47) & (i<58);1;0] };testcol: { i:`i$x; $[i=58;1;0] };testend: { i:`i$x; $[i=36;1;0] };s0: ((testAtZ; 1);(test0t9; 2));s1: ((test0t9; 2);(testcol; 3));s2: ((testend; 7); (testcol;5));s3: (,(testAtZ; 5));s4: ((testAtZ; 5); (test0t9; 6));s5: (,(testend; 7));s6: (,(testend; 7));s7: (({`0:"SUCCESS"};7));// 7 is donestates:: (s0;s1;s2;s3;s4;s5;s6;s7);i: 1;run: {[str] str,:"$"; `0:str; s: 0; o:s {[s;c] r:{t: x[0][y]; `0:`k@(y); o:$[t;x[1];0]; o}[;c]'states[y]; ns:states@(&r); ns}\str; o};// todo: don't be using $// prepare for matching pattern and returning the start index and lengthtest: "A:A";run[test]\n