Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1eea0c2672
|
||
|
|
67c7faf449
|
||
|
|
82c69ec542
|
||
|
|
07e0e587fa
|
||
|
|
1bd6e1256c
|
||
|
|
64a7648fbc
|
||
|
|
9cce861b2e
|
||
|
|
927ad08057
|
||
|
|
2edbc9306d
|
||
|
|
bb71be187f
|
||
|
|
834395c050
|
48
.githooks/pre-commit/01-docgen
Executable file
48
.githooks/pre-commit/01-docgen
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
orig="${PWD}"
|
||||
|
||||
if ! command -v asciidoctor &> /dev/null;
|
||||
then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
for f in $(find . -type f -iname "README.adoc"); do
|
||||
filename=$(basename -- "${f}")
|
||||
docsdir=$(dirname -- "${f}")
|
||||
nosuffix="${filename%.*}"
|
||||
pfx="${docsdir}/${nosuffix}"
|
||||
|
||||
newf="${pfx}.html"
|
||||
asciidoctor -a ROOTDIR="${orig}/" -o "${newf}" "${f}"
|
||||
echo "Generated ${newf} from ${f}"
|
||||
git add "${newf}"
|
||||
if command -v asciidoctor-pdf &> /dev/null;
|
||||
then
|
||||
newf="${pfx}.pdf"
|
||||
|
||||
asciidoctor-pdf -a ROOTDIR="${orig}/" -o "${newf}" "${f}"
|
||||
fi
|
||||
if command -v pandoc &> /dev/null;
|
||||
then
|
||||
newf="${pfx}.md"
|
||||
|
||||
set +e
|
||||
#asciidoctor -a ROOTDIR="${orig}/" -b docbook -o - "${f}" | pandoc -f docbook -t markdown_strict -o "${newf}"
|
||||
#asciidoctor -a ROOTDIR="${orig}/" -b html -o - "${f}" | pandoc -f html -t markdown_strict -o "${newf}"
|
||||
asciidoctor -a ROOTDIR="${orig}/" -b html -o - "${f}" | pandoc -f html -t gfm -o "${newf}"
|
||||
if [ $? -eq 0 ];
|
||||
then
|
||||
echo "Generated ${newf} from ${f}"
|
||||
git add "${newf}"
|
||||
else
|
||||
echo "Failed to generate ${newf} from ${f}"
|
||||
git rm "${newf}" 2>/dev/null
|
||||
fi
|
||||
set -e
|
||||
fi
|
||||
cd ${orig}
|
||||
done
|
||||
echo "Regenerated docs"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -19,12 +19,13 @@
|
||||
.idea/
|
||||
|
||||
# https://github.com/github/gitignore/blob/master/Go.gitignore
|
||||
# Binaries for programs and plugins
|
||||
# Binaries for programs and plugins and other data
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
*.pdf
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
1
TODO
Normal file
1
TODO
Normal file
@@ -0,0 +1 @@
|
||||
- validx: validator functions for https://pkg.go.dev/github.com/go-playground/validator/v10
|
||||
19
chkplat.sh
Executable file
19
chkplat.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# go tool dist list for all valid GOOS/GOARCH targets.
|
||||
|
||||
for tgt in $(go tool dist list);
|
||||
do
|
||||
o="$(echo ${tgt} | cut -f1 -d '/')"
|
||||
a="$(echo ${tgt} | cut -f2 -d '/')"
|
||||
out="$(env GOOS=${o} GOARCH=${a} go build ./... 2>&1)"
|
||||
ret=${?}
|
||||
if [ $ret -ne 0 ];
|
||||
then
|
||||
echo "OS: ${o}"
|
||||
echo "ARCH: ${a}"
|
||||
echo "${out}"
|
||||
echo
|
||||
echo
|
||||
fi
|
||||
done
|
||||
25
go.mod
25
go.mod
@@ -3,14 +3,33 @@ module r00t2.io/goutils
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/coreos/go-systemd/v22 v22.6.0
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/coreos/go-systemd/v22 v22.7.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/shirou/gopsutil/v4 v4.25.12
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
golang.org/x/sys v0.39.0
|
||||
r00t2.io/sysutils v1.15.1
|
||||
golang.org/x/sys v0.40.0
|
||||
r00t2.io/sysutils v1.16.2
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/djherbis/times v1.6.0 // indirect
|
||||
github.com/ebitengine/purego v0.9.1 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
)
|
||||
|
||||
70
go.sum
70
go.sum
@@ -1,16 +1,72 @@
|
||||
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
||||
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
|
||||
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
|
||||
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
|
||||
github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
|
||||
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY=
|
||||
github.com/shirou/gopsutil/v4 v4.25.12/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
r00t2.io/sysutils v1.15.0 h1:FSnREfbXDhBQEO7LMpnRQeKlPshozxk9XHw3YgWRgRg=
|
||||
r00t2.io/sysutils v1.15.0/go.mod h1:28qB0074EIRQ8Sy/ybaA5jC3qA32iW2aYLkMCRhyAFM=
|
||||
r00t2.io/sysutils v1.15.1/go.mod h1:T0iOnaZaSG5NE1hbXTqojRZc0ia/u8TB73lV7zhMz58=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
r00t2.io/sysutils v1.16.2/go.mod h1:iXK+ALOwIdRKjAJIE5USlkZ669SVDHBNNuYhunsznH8=
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !(windows || plan9 || wasip1 || js || ios)
|
||||
// +build !windows,!plan9,!wasip1,!js,!ios
|
||||
|
||||
// I mean maybe it works for plan9 and ios, I don't know.
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(windows || plan9 || wasip1 || js || ios || linux)
|
||||
|
||||
package logging
|
||||
|
||||
var (
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !(windows || plan9 || wasip1 || js || ios || linux)
|
||||
// +build !windows,!plan9,!wasip1,!js,!ios,!linux
|
||||
|
||||
// Linux is excluded because it has its own.
|
||||
|
||||
|
||||
21
multierr/TODO
Normal file
21
multierr/TODO
Normal file
@@ -0,0 +1,21 @@
|
||||
- add unwrapping
|
||||
https://go.dev/blog/go1.13-errors#the-unwrap-method
|
||||
- add As method, takes a ptr to a slice of []error to return the first matching error type (errors.As) for each?
|
||||
- add AsAll [][]error ptr param for multiple errors per type?
|
||||
- add Map, returns map[string][]error, where key is k:
|
||||
var sb strings.Builder
|
||||
t = reflect.TypeOf(err)
|
||||
if t.PkgPath() != "" {
|
||||
sb.WriteString(t.PkgPath())
|
||||
} else {
|
||||
sb.WriteString("<UNKNOWN>")
|
||||
}
|
||||
sb.WriteString(".")
|
||||
if t.Name() != "" {
|
||||
sb.WriteString(t.Name())
|
||||
} else {
|
||||
sb.WriteString("<UNKNOWN>")
|
||||
}
|
||||
k = sb.String()
|
||||
- support generics for similar to above?
|
||||
- this might allow for "error filtering"
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
/*
|
||||
AddrRfc returns an RFC-friendly string from an IP address ([net/netip.Addr]).
|
||||
|
||||
If addr is an IPv4 address, it will simmply be the string representation (e.g. "203.0.113.1").
|
||||
If addr is an IPv4 address, it will simply be the string representation (e.g. "203.0.113.1").
|
||||
|
||||
If addr is an IPv6 address, it will be enclosed in brackets (e.g. "[2001:db8::1]").
|
||||
|
||||
|
||||
10
remap/doc.go
10
remap/doc.go
@@ -1,4 +1,12 @@
|
||||
/*
|
||||
Package remap provides convenience functions around regular expressions, primarily offering maps for named capture groups.
|
||||
Package remap provides convenience functions around regular expressions,
|
||||
primarily offering maps for named capture groups.
|
||||
|
||||
It offers convenience equivalents of the following:
|
||||
|
||||
* [regexp.Compile] ([Compile])
|
||||
* [regexp.CompilePOSIX] ([CompilePOSIX])
|
||||
* [regexp.MustCompile] ([MustCompile])
|
||||
* [regexp.MustCompilePOSIX] ([MustCompilePOSIX])
|
||||
*/
|
||||
package remap
|
||||
|
||||
11
remap/errs.go
Normal file
11
remap/errs.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package remap
|
||||
|
||||
import (
|
||||
`errors`
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidIdxPair error = errors.New("invalid index pair; [1] must be >= [0]")
|
||||
ErrNoStr error = errors.New("no string to slice/reslice/subslice")
|
||||
ErrShortStr error = errors.New("string too short to slice/reslice/subslice")
|
||||
)
|
||||
170
remap/funcs.go
Normal file
170
remap/funcs.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package remap
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
Compile is a convenience shorthand for:
|
||||
|
||||
var err error
|
||||
var r *remap.ReMap = new(remap.ReMap)
|
||||
|
||||
if r.Regexp, err = regexp.Compile(expr); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
It corresponds to [regexp.Compile].
|
||||
*/
|
||||
func Compile(expr string) (r *ReMap, err error) {
|
||||
|
||||
var p *regexp.Regexp
|
||||
|
||||
if p, err = regexp.Compile(expr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &ReMap{
|
||||
Regexp: p,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
CompilePOSIX is a convenience shorthand for:
|
||||
|
||||
var err error
|
||||
var r *remap.ReMap = new(remap.ReMap)
|
||||
|
||||
if r.Regexp, err = regexp.CompilePOSIX(expr); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
It corresponds to [regexp.CompilePOSIX].
|
||||
*/
|
||||
func CompilePOSIX(expr string) (r *ReMap, err error) {
|
||||
|
||||
var p *regexp.Regexp
|
||||
|
||||
if p, err = regexp.CompilePOSIX(expr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &ReMap{
|
||||
Regexp: p,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
MustCompile is a convenience shorthand for:
|
||||
|
||||
var r *remap.ReMap = &remap.ReMap{
|
||||
Regexp: regexp.MustCompile(expr),
|
||||
}
|
||||
|
||||
It corresponds to [regexp.MustCompile].
|
||||
*/
|
||||
func MustCompile(expr string) (r *ReMap) {
|
||||
|
||||
var err error
|
||||
var p *regexp.Regexp
|
||||
|
||||
// We panic ourselves instead of wrapping regexp.MustCompile.
|
||||
// Makes debuggers a little more explicit.
|
||||
if p, err = regexp.Compile(expr); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r = &ReMap{
|
||||
Regexp: p,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
MustCompilePOSIX is a convenience shorthand for:
|
||||
|
||||
var r *remap.ReMap = &remap.ReMap{
|
||||
Regexp: regexp.MustCompilePOSIX(expr),
|
||||
}
|
||||
|
||||
It corresponds to [regexp.MustCompilePOSIX].
|
||||
*/
|
||||
func MustCompilePOSIX(expr string) (r *ReMap) {
|
||||
|
||||
var err error
|
||||
var p *regexp.Regexp
|
||||
|
||||
// We panic ourselves instead of wrapping regexp.MustCompilePOSIX.
|
||||
// Makes debuggers a little more explicit.
|
||||
if p, err = regexp.CompilePOSIX(expr); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r = &ReMap{
|
||||
Regexp: p,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
strIdxSlicer takes string s, and returns the substring marked by idxPair,
|
||||
where:
|
||||
|
||||
idxPair = [2]int{
|
||||
<substring START POSITION>,
|
||||
<substring END BOUNDARY>,
|
||||
}
|
||||
|
||||
That is, to get `oo` from `foobar`,
|
||||
|
||||
idxPair = [2]int{1, 3}
|
||||
# NOT:
|
||||
#idxPair = [2]int{1, 2}
|
||||
|
||||
subStr will be empty and matched will be false if:
|
||||
|
||||
* idxPair[0] < 0
|
||||
* idxPair[1] < 0
|
||||
|
||||
It will panic with [ErrShortStr] if:
|
||||
|
||||
* idxPair[0] > len(s)-1
|
||||
* idxPair[1] > len(s)
|
||||
|
||||
It will panic with [ErrInvalidIdxPair] if:
|
||||
|
||||
* idxPair[0] > idxPair[1]
|
||||
|
||||
It will properly handle single-character addresses (i.e. idxPair[0] == idxPair[1]).
|
||||
*/
|
||||
func strIdxSlicer(s string, idxPair [2]int) (subStr string, matched bool) {
|
||||
|
||||
if idxPair[0] < 0 || idxPair[1] < 0 {
|
||||
return
|
||||
}
|
||||
matched = true
|
||||
|
||||
if (idxPair[0] > (len(s) - 1)) ||
|
||||
(idxPair[1] > len(s)) {
|
||||
panic(ErrShortStr)
|
||||
}
|
||||
if idxPair[0] > idxPair[1] {
|
||||
panic(ErrInvalidIdxPair)
|
||||
}
|
||||
|
||||
if idxPair[0] == idxPair[1] {
|
||||
// single character
|
||||
subStr = string(s[idxPair[0]])
|
||||
} else {
|
||||
// multiple characters
|
||||
subStr = s[idxPair[0]:idxPair[1]]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -5,9 +5,14 @@ Map returns a map[string][]<match bytes> for regexes with named capture groups m
|
||||
Note that this supports non-unique group names; [regexp.Regexp] allows for patterns with multiple groups
|
||||
using the same group name (though your IDE might complain; I know GoLand does).
|
||||
|
||||
It will panic if the embedded [regexp.Regexp] is nil.
|
||||
|
||||
Each match for each group is in a slice keyed under that group name, with that slice
|
||||
ordered by the indexing done by the regex match itself.
|
||||
|
||||
This operates on only the first found match (like [regexp.Regexp.FindSubmatch]).
|
||||
To operate on *all* matches, use [ReMap.MapAll].
|
||||
|
||||
In summary, the parameters are as follows:
|
||||
|
||||
# inclNoMatch
|
||||
@@ -31,6 +36,7 @@ is provided but b does not match then matches will be:
|
||||
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
||||
a slice of nil values explicitly matching the number of times the group name is specified
|
||||
in the pattern.
|
||||
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
|
||||
|
||||
For example, if a pattern:
|
||||
|
||||
@@ -87,7 +93,7 @@ In detail, matches and/or its values may be nil or empty under the following con
|
||||
IF inclNoMatch is true
|
||||
IF inclNoMatchStrict is true
|
||||
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
|
||||
(matches[<group name>] == [][]byte{nil[, nil...]})
|
||||
(matches[<group name>] == [][]byte{nil[, nil, ...]})
|
||||
ELSE
|
||||
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
||||
ELSE
|
||||
@@ -109,7 +115,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
||||
return
|
||||
}
|
||||
|
||||
names = r.Regexp.SubexpNames()
|
||||
names = r.Regexp.SubexpNames()[:]
|
||||
matchBytes = r.Regexp.FindSubmatch(b)
|
||||
|
||||
if matchBytes == nil {
|
||||
@@ -142,6 +148,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
||||
if inclNoMatch {
|
||||
if len(names) >= 1 {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
matches[grpNm] = nil
|
||||
}
|
||||
}
|
||||
@@ -154,7 +163,7 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
||||
grpNm = names[mIdx]
|
||||
/*
|
||||
Thankfully, it's actually a build error if a pattern specifies a named
|
||||
capture group with an empty name.
|
||||
capture group with an matched name.
|
||||
So we don't need to worry about accounting for that,
|
||||
and can just skip over grpNm == "" (which is an *unnamed* capture group).
|
||||
*/
|
||||
@@ -190,6 +199,9 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
||||
// This *technically* should be completely handled above.
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = nil
|
||||
}
|
||||
@@ -204,13 +216,147 @@ func (r *ReMap) Map(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (m
|
||||
}
|
||||
|
||||
/*
|
||||
MapString is exactly like ReMap.Map(), but operates on (and returns) strings instead.
|
||||
(matches will always be nil if s == “.)
|
||||
MapAll behaves exactly like [ReMap.Map] but will "squash"/consolidate *all* found matches, not just the first occurrence,
|
||||
into the group name.
|
||||
|
||||
A small deviation, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
|
||||
You likely want to use this instead of [ReMap.Map] for multiline patterns.
|
||||
*/
|
||||
func (r *ReMap) MapAll(b []byte, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][][]byte) {
|
||||
|
||||
var ok bool
|
||||
var mIdx int
|
||||
var isEmpty bool
|
||||
var match []byte
|
||||
var grpNm string
|
||||
var names []string
|
||||
var mbGrp [][]byte
|
||||
var ptrnNms []string
|
||||
var matchBytes [][][]byte
|
||||
var tmpMap map[string][][]byte = make(map[string][][]byte)
|
||||
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
names = r.Regexp.SubexpNames()[:]
|
||||
matchBytes = r.Regexp.FindAllSubmatch(b, -1)
|
||||
|
||||
if matchBytes == nil {
|
||||
// b does not match pattern
|
||||
if !mustMatch {
|
||||
matches = make(map[string][][]byte)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||
/*
|
||||
no named capture groups;
|
||||
technically only the last condition would be the case.
|
||||
*/
|
||||
if inclNoMatch {
|
||||
matches = make(map[string][][]byte)
|
||||
}
|
||||
return
|
||||
}
|
||||
names = names[1:]
|
||||
|
||||
tmpMap = make(map[string][][]byte)
|
||||
|
||||
// From here, it behaves (sort of) like ReMap.Map
|
||||
// except mbGrp is like matchBytes in Map.
|
||||
for _, mbGrp = range matchBytes {
|
||||
|
||||
// Unlike ReMap.Map, we have to do a little additional logic.
|
||||
isEmpty = false
|
||||
ptrnNms = make([]string, 0, len(names))
|
||||
|
||||
if mbGrp == nil {
|
||||
isEmpty = true
|
||||
}
|
||||
|
||||
if !isEmpty {
|
||||
if len(mbGrp) == 0 || len(mbGrp) == 1 {
|
||||
/*
|
||||
no submatches whatsoever.
|
||||
*/
|
||||
isEmpty = true
|
||||
} else {
|
||||
mbGrp = mbGrp[1:]
|
||||
|
||||
for mIdx, match = range mbGrp {
|
||||
if mIdx > len(names) {
|
||||
break
|
||||
}
|
||||
grpNm = names[mIdx]
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
ptrnNms = append(ptrnNms, grpNm)
|
||||
|
||||
if match == nil {
|
||||
// This specific group didn't match, but it matched the whole pattern.
|
||||
if !inclNoMatch {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
if !inclNoMatchStrict {
|
||||
tmpMap[grpNm] = nil
|
||||
} else {
|
||||
tmpMap[grpNm] = [][]byte{nil}
|
||||
}
|
||||
} else {
|
||||
if inclNoMatchStrict {
|
||||
tmpMap[grpNm] = append(tmpMap[grpNm], nil)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = make([][]byte, 0)
|
||||
}
|
||||
tmpMap[grpNm] = append(tmpMap[grpNm], match)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// I can't recall why I capture this.
|
||||
_ = ptrnNms
|
||||
}
|
||||
|
||||
// *Theoretically* all of these should be populated with at least a nil.
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmpMap) > 0 {
|
||||
matches = tmpMap
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
MapString is exactly like [ReMap.Map], but operates on (and returns) strings instead.
|
||||
(matches will always be nil if s == "".)
|
||||
|
||||
It will panic if the embedded [regexp.Regexp] is nil.
|
||||
|
||||
This operates on only the first found match (like [regexp.Regexp.FindStringSubmatch]).
|
||||
To operate on *all* matches, use [ReMap.MapStringAll].
|
||||
|
||||
A small deviation and caveat, though; empty strings instead of nils (because duh) will occupy slice placeholders (if `inclNoMatchStrict` is specified).
|
||||
This unfortunately *does not provide any indication* if an empty string positively matched the pattern (a "hit") or if it was simply
|
||||
not matched at all (a "miss"). If you need definitive determination between the two conditions, it is instead recommended to either
|
||||
*not* use inclNoMatchStrict or to use ReMap.Map() instead and convert any non-nil values to strings after.
|
||||
*not* use inclNoMatchStrict or to use [ReMap.Map] instead and convert any non-nil values to strings after.
|
||||
|
||||
Particularly:
|
||||
|
||||
@@ -233,8 +379,9 @@ is provided but s does not match then matches will be:
|
||||
# inclNoMatchStrict
|
||||
|
||||
If true (and inclNoMatch is true), instead of a single nil the group's values will be
|
||||
a slice of eempty string values explicitly matching the number of times the group name is specified
|
||||
a slice of empty string values explicitly matching the number of times the group name is specified
|
||||
in the pattern.
|
||||
May be unpredictable if the same name is used multiple times for different capture groups across multiple patterns.
|
||||
|
||||
For example, if a pattern:
|
||||
|
||||
@@ -290,8 +437,8 @@ In detail, matches and/or its values may be nil or empty under the following con
|
||||
IF <group name> does not have a match
|
||||
IF inclNoMatch is true
|
||||
IF inclNoMatchStrict is true
|
||||
THEN matches[<group name>] is defined and non-nil, but populated with placeholder nils
|
||||
(matches[<group name>] == []string{""[, ""...]})
|
||||
THEN matches[<group name>] is defined and non-nil, but populated with placeholder strings
|
||||
(matches[<group name>] == []string{""[, "", ...]})
|
||||
ELSE
|
||||
THEN matches[<group name>] is guaranteed defined but may be nil (_, ok = matches[<group name>]; ok == true)
|
||||
ELSE
|
||||
@@ -304,27 +451,19 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
var ok bool
|
||||
var endIdx int
|
||||
var startIdx int
|
||||
var chunkIdx int
|
||||
var grpIdx int
|
||||
var grpNm string
|
||||
var names []string
|
||||
var matchStr string
|
||||
/*
|
||||
A slice of indices or index pairs.
|
||||
For each element `e` in idxChunks,
|
||||
* if `e` is nil, no group match.
|
||||
* if len(e) == 1, only a single character was matched.
|
||||
* otherwise len(e) == 2, the start and end of the match.
|
||||
*/
|
||||
var idxChunks [][]int
|
||||
var si stringIndexer
|
||||
var matchIndices []int
|
||||
var chunkIndices []int // always 2 elements; start pos and end pos
|
||||
var tmpMap map[string][]string = make(map[string][]string)
|
||||
|
||||
/*
|
||||
OK so this is a bit of a deviation.
|
||||
|
||||
It's not as straightforward as above, because there isn't an explicit way
|
||||
like above to determine if a pattern was *matched as an empty string* vs.
|
||||
like above to determine if a pattern was *matched as an matched string* vs.
|
||||
*not matched*.
|
||||
|
||||
So instead do roundabout index-y things.
|
||||
@@ -334,7 +473,8 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
return
|
||||
}
|
||||
/*
|
||||
I'm not entirely sure how serious they are about "the slice should not be modified"...
|
||||
I'm not entirely sure how serious they are about
|
||||
"the slice should not be modified"...
|
||||
|
||||
DO NOT sort or dedupe `names`! If the same name for groups is duplicated,
|
||||
it will be duplicated here in proper order and the ordering is tied to
|
||||
@@ -351,7 +491,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
return
|
||||
}
|
||||
|
||||
if names == nil || len(names) <= 1 {
|
||||
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||
/*
|
||||
No named capture groups;
|
||||
technically only the last condition would be the case,
|
||||
@@ -363,6 +503,7 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
}
|
||||
return
|
||||
}
|
||||
names = names[1:]
|
||||
|
||||
if len(matchIndices) == 0 || len(matchIndices) == 1 {
|
||||
/*
|
||||
@@ -378,26 +519,34 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
matches = make(map[string][]string)
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm != "" {
|
||||
matches[grpNm] = nil
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
matches[grpNm] = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
A reslice of `matchIndices` could technically start at 2 (as long as `names` is sliced [1:])
|
||||
because they're in pairs: []int{<start>, <end>, <start>, <end>, ...}
|
||||
and the first pair is the entire pattern match (un-resliced names[0]).
|
||||
Thus the len(matchIndices) == 2*len(names), *even* if you
|
||||
The reslice of `matchIndices` starts at 2 because they're in pairs:
|
||||
|
||||
[]int{<start>, <end>, <start>, <end>, ...}
|
||||
|
||||
and the first pair is the entire pattern match (un-resliced names[0],
|
||||
un-resliced matchIndices[0]).
|
||||
|
||||
Thus the len(matchIndices) == 2*len(names) (*should*, that is), *even* if you reslice.
|
||||
Keep in mind that since the first element of names is removed,
|
||||
the first pair here is skipped.
|
||||
This provides a bit more consistent readability, though.
|
||||
we reslice matchIndices as well.
|
||||
*/
|
||||
idxChunks = make([][]int, len(names))
|
||||
chunkIdx = 0
|
||||
endIdx = 0
|
||||
matchIndices = matchIndices[2:]
|
||||
|
||||
tmpMap = make(map[string][]string)
|
||||
|
||||
// Note that the second index is the *upper boundary*, not a *position in the string*
|
||||
// so these indices are perfectly usable as-is as returned from the regexp methods.
|
||||
// http://golang.org/ref/spec#Slice_expressions
|
||||
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
|
||||
endIdx = startIdx + 2
|
||||
// This technically should never happen.
|
||||
@@ -405,75 +554,253 @@ func (r *ReMap) MapString(s string, inclNoMatch, inclNoMatchStrict, mustMatch bo
|
||||
endIdx = len(matchIndices)
|
||||
}
|
||||
|
||||
chunkIndices = matchIndices[startIdx:endIdx]
|
||||
|
||||
if chunkIndices[0] == -1 || chunkIndices[1] == -1 {
|
||||
// group did not match
|
||||
chunkIndices = nil
|
||||
} else {
|
||||
if chunkIndices[0] == chunkIndices[1] {
|
||||
chunkIndices = []int{chunkIndices[0]}
|
||||
} else {
|
||||
chunkIndices = matchIndices[startIdx:endIdx]
|
||||
}
|
||||
}
|
||||
idxChunks[chunkIdx] = chunkIndices
|
||||
chunkIdx++
|
||||
if grpIdx >= len(names) {
|
||||
break
|
||||
}
|
||||
|
||||
// Now associate with names and pull the string sequence.
|
||||
for chunkIdx, chunkIndices = range idxChunks {
|
||||
grpNm = names[chunkIdx]
|
||||
/*
|
||||
Thankfully, it's actually a build error if a pattern specifies a named
|
||||
capture group with an empty name.
|
||||
So we don't need to worry about accounting for that,
|
||||
and can just skip over grpNm == ""
|
||||
(which is either an *unnamed* capture group
|
||||
OR the first element in `names`, which is always
|
||||
the entire match).
|
||||
*/
|
||||
if grpNm == "" {
|
||||
si = stringIndexer{
|
||||
group: grpIdx,
|
||||
start: matchIndices[startIdx],
|
||||
end: matchIndices[endIdx-1],
|
||||
matched: true,
|
||||
nm: names[grpIdx],
|
||||
grpS: "",
|
||||
s: &matchStr,
|
||||
ptrn: r.Regexp,
|
||||
}
|
||||
grpIdx++
|
||||
|
||||
if si.nm == "" {
|
||||
// unnamed capture group
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkIndices == nil || len(chunkIndices) == 0 {
|
||||
// group did not match
|
||||
// sets si.matched and si.grpS
|
||||
si.idxSlice(&s)
|
||||
|
||||
if !si.matched {
|
||||
if !inclNoMatch {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
if _, ok = tmpMap[si.nm]; !ok {
|
||||
if !inclNoMatchStrict {
|
||||
tmpMap[grpNm] = nil
|
||||
tmpMap[si.nm] = nil
|
||||
} else {
|
||||
tmpMap[grpNm] = []string{""}
|
||||
tmpMap[si.nm] = []string{""}
|
||||
}
|
||||
} else {
|
||||
if inclNoMatchStrict {
|
||||
tmpMap[grpNm] = append(tmpMap[grpNm], "")
|
||||
tmpMap[si.nm] = append(tmpMap[si.nm], "")
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch len(chunkIndices) {
|
||||
case 1:
|
||||
// Single character
|
||||
matchStr = string(s[chunkIndices[0]])
|
||||
case 2:
|
||||
// Multiple characters
|
||||
matchStr = s[chunkIndices[0]:chunkIndices[1]]
|
||||
if _, ok = tmpMap[si.nm]; !ok {
|
||||
tmpMap[si.nm] = make([]string, 0)
|
||||
}
|
||||
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = make([]string, 0)
|
||||
}
|
||||
tmpMap[grpNm] = append(tmpMap[grpNm], matchStr)
|
||||
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
|
||||
}
|
||||
|
||||
// This *technically* should be completely handled above.
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmpMap) > 0 {
|
||||
matches = tmpMap
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
MapStringAll behaves exactly like [ReMap.MapString] but will "squash"/consolidate *all* found matches, not just the first occurrence,
|
||||
into the group name.
|
||||
|
||||
You likely want to use this instead of [ReMap.MapString] for multiline patterns.
|
||||
*/
|
||||
func (r *ReMap) MapStringAll(s string, inclNoMatch, inclNoMatchStrict, mustMatch bool) (matches map[string][]string) {
|
||||
|
||||
var ok bool
|
||||
var endIdx int
|
||||
var startIdx int
|
||||
var grpIdx int
|
||||
var grpNm string
|
||||
var names []string
|
||||
var matchStr string
|
||||
var si stringIndexer
|
||||
var matchIndices []int
|
||||
var allMatchIndices [][]int
|
||||
var tmpMap map[string][]string = make(map[string][]string)
|
||||
|
||||
if s == "" {
|
||||
return
|
||||
}
|
||||
|
||||
names = r.Regexp.SubexpNames()[:]
|
||||
allMatchIndices = r.Regexp.FindAllStringSubmatchIndex(s, -1)
|
||||
|
||||
if allMatchIndices == nil {
|
||||
// s does not match pattern at all.
|
||||
if !mustMatch {
|
||||
matches = make(map[string][]string)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if names == nil || len(names) == 0 || len(names) == 1 {
|
||||
/*
|
||||
No named capture groups;
|
||||
technically only the last condition would be the case,
|
||||
as (regexp.Regexp).SubexpNames() will ALWAYS at the LEAST
|
||||
return a `[]string{""}`.
|
||||
*/
|
||||
if inclNoMatch {
|
||||
matches = make(map[string][]string)
|
||||
}
|
||||
return
|
||||
}
|
||||
names = names[1:]
|
||||
|
||||
if len(allMatchIndices) == 0 {
|
||||
// No matches (and thus submatches) whatsoever.
|
||||
// I think this is actually covered by the `if allMatchIndices == nil { ... }` above,
|
||||
// but this is still here for safety and efficiency - early return on no matches to iterate.
|
||||
matches = make(map[string][]string)
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
matches[grpNm] = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// Do *NOT* trim/reslice allMatchIndices!
|
||||
// The reslicing is done below, *inside* each matchIndices iteration!
|
||||
|
||||
tmpMap = make(map[string][]string)
|
||||
|
||||
// From here, it behaves (sort of) like ReMap.MapString.
|
||||
|
||||
// Build the strictly-paired chunk indexes and populate them.
|
||||
// We are iterating over *match sets*; matchIndices here should be analgous
|
||||
// to matchIndices in ReMap.MapString.
|
||||
for _, matchIndices = range allMatchIndices {
|
||||
|
||||
if matchIndices == nil {
|
||||
// I *think* the exception with the *All* variant here
|
||||
// is the *entire* return (allMatchIndices) is nil if there
|
||||
// aren't any matches; I can't imagine there'd be any feasible
|
||||
// way it'd insert a nil *element* for an index mapping group.
|
||||
// So just continuing here should be fine;
|
||||
// this continue SHOULD be unreachable.
|
||||
continue
|
||||
}
|
||||
|
||||
// Reslice *here*, on the particular match index group.
|
||||
// Grap the matchStr first; it's not currently *used* by anything but may in the future.
|
||||
matchStr, ok = strIdxSlicer(
|
||||
s,
|
||||
*(*[2]int)(matchIndices[0:2]),
|
||||
)
|
||||
if len(matchIndices) == 0 || len(matchIndices) == 1 {
|
||||
// No *sub*matches (capture groups) in this match, but it still matched the pattern.
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
// We don't immediately return, though; we just stage out group names just in case.
|
||||
// That's why we use tmpMap and not matches.
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
matchIndices = matchIndices[2:]
|
||||
|
||||
// Reset from previous loop
|
||||
endIdx = 0
|
||||
grpIdx = 0
|
||||
|
||||
for startIdx = 0; endIdx < len(matchIndices); startIdx += 2 {
|
||||
endIdx = startIdx + 2
|
||||
if endIdx > len(matchIndices) {
|
||||
endIdx = len(matchIndices)
|
||||
}
|
||||
|
||||
if grpIdx >= len(names) {
|
||||
break
|
||||
}
|
||||
|
||||
si = stringIndexer{
|
||||
group: grpIdx,
|
||||
start: matchIndices[startIdx],
|
||||
end: matchIndices[endIdx-1],
|
||||
matched: true,
|
||||
nm: names[grpIdx],
|
||||
grpS: "",
|
||||
ptrn: r.Regexp,
|
||||
}
|
||||
grpIdx++
|
||||
// We do not include the entire match string here;
|
||||
// we don't need it for this. Waste of memory.
|
||||
_ = matchStr
|
||||
/*
|
||||
si.s = new(string)
|
||||
*si.s = matchStr
|
||||
*/
|
||||
|
||||
if si.nm == "" {
|
||||
// unnamed capture group
|
||||
continue
|
||||
}
|
||||
|
||||
// sets si.matched and si.grpS
|
||||
si.idxSlice(&s)
|
||||
|
||||
if !si.matched {
|
||||
if !inclNoMatch {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[si.nm]; !ok {
|
||||
if !inclNoMatchStrict {
|
||||
tmpMap[si.nm] = nil
|
||||
} else {
|
||||
tmpMap[si.nm] = []string{""}
|
||||
}
|
||||
} else {
|
||||
if inclNoMatchStrict {
|
||||
tmpMap[si.nm] = append(tmpMap[si.nm], "")
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok = tmpMap[si.nm]; !ok {
|
||||
tmpMap[si.nm] = make([]string, 0)
|
||||
}
|
||||
tmpMap[si.nm] = append(tmpMap[si.nm], si.grpS)
|
||||
}
|
||||
}
|
||||
|
||||
if inclNoMatch {
|
||||
for _, grpNm = range names {
|
||||
if grpNm == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok = tmpMap[grpNm]; !ok {
|
||||
tmpMap[grpNm] = nil
|
||||
}
|
||||
|
||||
344
remap/funcs_remap_test.go
Normal file
344
remap/funcs_remap_test.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package remap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type (
|
||||
testMatcher struct {
|
||||
Nm string
|
||||
S string
|
||||
M *ReMap
|
||||
All bool
|
||||
Expected map[string][][]byte
|
||||
ExpectedStr map[string][]string
|
||||
ParamInclNoMatch bool
|
||||
ParamInclNoMatchStrict bool
|
||||
ParamInclMustMatch bool
|
||||
}
|
||||
)
|
||||
|
||||
func TestRemap(t *testing.T) {
|
||||
|
||||
var matches map[string][][]byte
|
||||
|
||||
for midx, m := range []testMatcher{
|
||||
// 1
|
||||
testMatcher{
|
||||
Nm: "No matches",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: nil,
|
||||
},
|
||||
// 2
|
||||
testMatcher{
|
||||
Nm: "Single mid match",
|
||||
S: "This contains a single match in the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
|
||||
Expected: map[string][][]byte{
|
||||
"g1": [][]byte{[]byte("match")},
|
||||
},
|
||||
},
|
||||
// 3
|
||||
testMatcher{
|
||||
Nm: "multi mid match",
|
||||
S: "This contains a single match and another match in the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
|
||||
Expected: map[string][][]byte{
|
||||
"g1": [][]byte{
|
||||
[]byte("match"),
|
||||
[]byte("match"),
|
||||
},
|
||||
},
|
||||
},
|
||||
// 4
|
||||
testMatcher{
|
||||
Nm: "line match",
|
||||
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
|
||||
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
|
||||
Expected: map[string][][]byte{
|
||||
"g1": [][]byte{
|
||||
[]byte("match"),
|
||||
},
|
||||
},
|
||||
},
|
||||
// 5
|
||||
testMatcher{
|
||||
Nm: "multiline match",
|
||||
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
|
||||
All: true,
|
||||
Expected: map[string][][]byte{
|
||||
"g1": [][]byte{
|
||||
[]byte("match"),
|
||||
[]byte("match"),
|
||||
},
|
||||
},
|
||||
},
|
||||
// 6
|
||||
// More closely mirrors something closer to real-life
|
||||
testMatcher{
|
||||
Nm: "mixed match",
|
||||
S: " # No longer log hits/reqs/resps to file.\n" +
|
||||
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
|
||||
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
|
||||
" access_log off;\n" +
|
||||
" error_log /dev/null;\n\n" +
|
||||
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
|
||||
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
|
||||
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
|
||||
All: true,
|
||||
Expected: map[string][][]byte{
|
||||
"logpath": [][]byte{
|
||||
[]byte("off"),
|
||||
[]byte("/dev/null"),
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
if m.All {
|
||||
matches = m.M.MapAll([]byte(m.S), false, false, false)
|
||||
} else {
|
||||
matches = m.M.Map([]byte(m.S), false, false, false)
|
||||
}
|
||||
t.Logf(
|
||||
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
|
||||
midx+1,
|
||||
m.S,
|
||||
m.M.Regexp.String(),
|
||||
testBmapToStrMap(matches),
|
||||
)
|
||||
if !reflect.DeepEqual(matches, m.Expected) {
|
||||
t.Fatalf("Case #%d (\"%s\"): expected '%#v' != received '%#v'", midx+1, m.Nm, m.Expected, matches)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRemapParams(t *testing.T) {
|
||||
|
||||
var matches map[string][][]byte
|
||||
|
||||
for midx, m := range []testMatcher{
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: nil,
|
||||
ParamInclNoMatch: false,
|
||||
ParamInclNoMatchStrict: false,
|
||||
ParamInclMustMatch: false,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: nil,
|
||||
ParamInclNoMatch: false,
|
||||
ParamInclNoMatchStrict: true,
|
||||
ParamInclMustMatch: false,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: nil,
|
||||
ParamInclNoMatch: false,
|
||||
ParamInclNoMatchStrict: true,
|
||||
ParamInclMustMatch: true,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: nil,
|
||||
ParamInclNoMatch: false,
|
||||
ParamInclNoMatchStrict: false,
|
||||
ParamInclMustMatch: true,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: make(map[string][][]byte),
|
||||
ParamInclNoMatch: true,
|
||||
ParamInclNoMatchStrict: false,
|
||||
ParamInclMustMatch: false,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: make(map[string][][]byte),
|
||||
ParamInclNoMatch: true,
|
||||
ParamInclNoMatchStrict: true,
|
||||
ParamInclMustMatch: false,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: make(map[string][][]byte),
|
||||
ParamInclNoMatch: true,
|
||||
ParamInclNoMatchStrict: true,
|
||||
ParamInclMustMatch: true,
|
||||
},
|
||||
testMatcher{
|
||||
Nm: "",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
Expected: make(map[string][][]byte),
|
||||
ParamInclNoMatch: true,
|
||||
ParamInclNoMatchStrict: false,
|
||||
ParamInclMustMatch: true,
|
||||
},
|
||||
} {
|
||||
if m.All {
|
||||
matches = m.M.MapAll([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
|
||||
} else {
|
||||
matches = m.M.Map([]byte(m.S), m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch)
|
||||
}
|
||||
t.Logf(
|
||||
"%d: %v/%v/%v: %#v\n",
|
||||
midx+1, m.ParamInclNoMatch, m.ParamInclNoMatchStrict, m.ParamInclMustMatch, matches,
|
||||
)
|
||||
if !reflect.DeepEqual(matches, m.Expected) {
|
||||
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRemapString(t *testing.T) {
|
||||
|
||||
var matches map[string][]string
|
||||
|
||||
for midx, m := range []testMatcher{
|
||||
// 1
|
||||
testMatcher{
|
||||
Nm: "No matches",
|
||||
S: "this is a test",
|
||||
M: &ReMap{regexp.MustCompile(``)},
|
||||
ExpectedStr: nil,
|
||||
},
|
||||
// 2
|
||||
testMatcher{
|
||||
Nm: "Single mid match",
|
||||
S: "This contains a single match in the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match)\s+`)},
|
||||
ExpectedStr: map[string][]string{
|
||||
"g1": []string{"match"},
|
||||
},
|
||||
},
|
||||
// 3
|
||||
testMatcher{
|
||||
Nm: "multi mid match",
|
||||
S: "This contains a single match and another match in the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another (?P<g1>match)\s+`)},
|
||||
ExpectedStr: map[string][]string{
|
||||
"g1": []string{
|
||||
"match",
|
||||
"match",
|
||||
},
|
||||
},
|
||||
},
|
||||
// 4
|
||||
testMatcher{
|
||||
Nm: "line match",
|
||||
S: "This\ncontains a\nsingle\nmatch\non a dedicated line",
|
||||
M: &ReMap{regexp.MustCompile(`(?m)^(?P<g1>match)$`)},
|
||||
ExpectedStr: map[string][]string{
|
||||
"g1": []string{
|
||||
"match",
|
||||
},
|
||||
},
|
||||
},
|
||||
// 5
|
||||
testMatcher{
|
||||
Nm: "multiline match",
|
||||
S: "This\ncontains a\nsingle match and another\nmatch\nin the middle of a string",
|
||||
M: &ReMap{regexp.MustCompile(`\s+(?P<g1>match) and another\s+(?P<g1>match)\s+`)},
|
||||
All: true,
|
||||
ExpectedStr: map[string][]string{
|
||||
"g1": []string{
|
||||
"match",
|
||||
"match",
|
||||
},
|
||||
},
|
||||
},
|
||||
// 6
|
||||
// More closely mirrors something closer to real-life
|
||||
testMatcher{
|
||||
Nm: "mixed match",
|
||||
S: " # No longer log hits/reqs/resps to file.\n" +
|
||||
" #access_log /mnt/nginx_logs/vhost/tenant/site/access.log main;\n" +
|
||||
" #error_log /mnt/nginx_logs/vhost/tenant/site/error.log;\n" +
|
||||
" access_log off;\n" +
|
||||
" error_log /dev/null;\n\n" +
|
||||
" ssl_certificate /etc/nginx/tls/crt/tenant.pem;\n" +
|
||||
" ssl_certificate_key /etc/nginx/tls/key/tenant.pem;\n\n",
|
||||
M: &ReMap{regexp.MustCompile(`(?m)^\s*(?:error|access)_log\s+(?P<logpath>.+);\s*$`)},
|
||||
All: true,
|
||||
ExpectedStr: map[string][]string{
|
||||
"logpath": []string{
|
||||
"off",
|
||||
"/dev/null",
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
if m.All {
|
||||
matches = m.M.MapStringAll(m.S, false, false, false)
|
||||
} else {
|
||||
matches = m.M.MapString(m.S, false, false, false)
|
||||
}
|
||||
t.Logf(
|
||||
"#%d:\n\tsrc:\t'%s'\n\tptrn:\t'%s'\n\tmatch:\t%s\n",
|
||||
midx+1,
|
||||
m.S,
|
||||
m.M.Regexp.String(),
|
||||
testSmapToStrMap(matches),
|
||||
)
|
||||
if !reflect.DeepEqual(matches, m.ExpectedStr) {
|
||||
t.Fatalf("Case #%d (\"%s\"): '%#v' != '%#v'", midx+1, m.Nm, m.ExpectedStr, matches)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testBmapToStrMap(bmap map[string][][]byte) (s string) {
|
||||
|
||||
if bmap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s = "\n"
|
||||
for k, v := range bmap {
|
||||
s += fmt.Sprintf("\t%s\n", k)
|
||||
for _, i := range v {
|
||||
s += fmt.Sprintf("\t\t%s\n", string(i))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func testSmapToStrMap(smap map[string][]string) (s string) {
|
||||
|
||||
if smap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s = "\n"
|
||||
for k, v := range smap {
|
||||
s += fmt.Sprintf("\t%s\n", k)
|
||||
for _, i := range v {
|
||||
s += fmt.Sprintf("\t\t%s\n", i)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
34
remap/funcs_stringindexer.go
Normal file
34
remap/funcs_stringindexer.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package remap
|
||||
|
||||
// idx returns []int{s.start, s.end}.
|
||||
func (s *stringIndexer) idx() (i []int) {
|
||||
return []int{s.start, s.end}
|
||||
}
|
||||
|
||||
// idxStrict returns [2]int{s.start, s.end}.
|
||||
func (s *stringIndexer) idxStrict() (i [2]int) {
|
||||
return [2]int{s.start, s.end}
|
||||
}
|
||||
|
||||
/*
|
||||
idxSlice populates s.grpS using s.start and s.end.
|
||||
|
||||
If str is nil, it will use s.s.
|
||||
If str is nil and s.s is nil, it will panic with [ErrNoStr].
|
||||
|
||||
If the pattern does not match (s.start < 0 or s.end < 0),
|
||||
s.matched will be set to false (otherwise true).
|
||||
*/
|
||||
func (s *stringIndexer) idxSlice(str *string) {
|
||||
|
||||
if str == nil {
|
||||
if s.s == nil {
|
||||
panic(ErrNoStr)
|
||||
}
|
||||
str = s.s
|
||||
}
|
||||
|
||||
s.grpS, s.matched = strIdxSlicer(*str, s.idxStrict())
|
||||
|
||||
return
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
type (
|
||||
// ReMap provides some map-related functions around a regexp.Regexp.
|
||||
// ReMap provides some map-related functions around a [regexp.Regexp].
|
||||
ReMap struct {
|
||||
*regexp.Regexp
|
||||
}
|
||||
@@ -24,4 +24,45 @@ type (
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
stringIndexer struct {
|
||||
// group is the capture group index for this match.
|
||||
group int
|
||||
// start is the string index (from the original string) where the matched group starts
|
||||
start int
|
||||
// end is the string index where the matched group ends
|
||||
end int
|
||||
/*
|
||||
matched indicates if explicitly no match was found.
|
||||
(This is normally indeterminate with string regex returns,
|
||||
as e.g. `(?P<mygrp>\s*)`, `(?P<mygrp>(?:somestring)?)`, etc. all can be a *matched* "".)
|
||||
|
||||
If grpS == "" and matched == true, it DID match an empty string.
|
||||
If grpS == "" and matched == false, it DID NOT MATCH the pattern.
|
||||
If grpS != "", matched can be completely disregarded.
|
||||
*/
|
||||
matched bool
|
||||
// nm is the match group name.
|
||||
nm string
|
||||
/*
|
||||
grpS is the actual group-matched *substring*.
|
||||
|
||||
It will ALWAYS be either:
|
||||
|
||||
* the entirety of s
|
||||
* a substring of s
|
||||
* an empty string
|
||||
|
||||
it will never, and cannot be, a SUPERset of s.
|
||||
it may not always be included/populated to save on memory.
|
||||
*/
|
||||
grpS string
|
||||
/*
|
||||
s is the *entire* MATCHED (sub)string.
|
||||
It may not always be populated if not needed to save memory.
|
||||
*/
|
||||
s *string
|
||||
// ptrn is the pattern applied to s.
|
||||
ptrn *regexp.Regexp
|
||||
}
|
||||
)
|
||||
|
||||
2077
tplx/sprigx/README.adoc
Normal file
2077
tplx/sprigx/README.adoc
Normal file
File diff suppressed because it is too large
Load Diff
3696
tplx/sprigx/README.html
Normal file
3696
tplx/sprigx/README.html
Normal file
File diff suppressed because it is too large
Load Diff
6391
tplx/sprigx/README.md
Normal file
6391
tplx/sprigx/README.md
Normal file
File diff suppressed because it is too large
Load Diff
14
tplx/sprigx/TODO
Normal file
14
tplx/sprigx/TODO
Normal file
@@ -0,0 +1,14 @@
|
||||
- osReadFileBytes
|
||||
- osReadFileStr
|
||||
- osReadDir
|
||||
|
||||
- `dns*` funcs (net)
|
||||
- `url*` funcs (net/url)
|
||||
- `uuid*` funcs (github.com/google/uuid and r00t2.io/goutils/uuidx)
|
||||
|
||||
- `http*` funcs:
|
||||
-- `httpReq`: returns a net/http.Request
|
||||
-- `http<Method>`: performs <Method> (? seems redundant if exposing httpReq)
|
||||
-- also have `resty*` funcs?
|
||||
|
||||
- i should probably explicitly provide a "safe" set vs. "full" set. can just mod the map func getters to accept a "safeOnly" bool param.
|
||||
101
tplx/sprigx/_test.tpl
Normal file
101
tplx/sprigx/_test.tpl
Normal file
@@ -0,0 +1,101 @@
|
||||
################################################################################
|
||||
# RUNTIME #
|
||||
################################################################################
|
||||
|
||||
{{- $rntm := sysRuntime }}
|
||||
|
||||
Arch: {{ sysArch }}
|
||||
CPUs: {{ sysNumCpu }}
|
||||
OS: {{ sysNumCpu }}
|
||||
|
||||
RUNTIME: {{ $rntm }}
|
||||
{{ range $rntmk, $rntmv := $rntm }}
|
||||
{{ $rntmk }}:
|
||||
{{ $rntmv }}
|
||||
{{- end }}
|
||||
{{ dump $rntm }}
|
||||
|
||||
|
||||
################################################################################
|
||||
# PATHS #
|
||||
################################################################################
|
||||
|
||||
###########
|
||||
# Generic #
|
||||
###########
|
||||
|
||||
pathJoin "a" "b" "c"
|
||||
{{ pathJoin "a" "b" "c" }}
|
||||
|
||||
pathJoin "/" "a" "b" "c"
|
||||
{{ pathJoin "/" "a" "b" "c" }}
|
||||
|
||||
pathJoin "/a" "b" "c"
|
||||
{{ pathJoin "/a" "b" "c" }}
|
||||
|
||||
#
|
||||
|
||||
pathPipeJoin "b" "c" "d" "a"
|
||||
{{ pathPipeJoin "b" "c" "d" "a" }}
|
||||
|
||||
"a" | pathPipeJoin "b" "c" "d"
|
||||
{{ "a" | pathPipeJoin "b" "c" "d"}}
|
||||
#
|
||||
|
||||
$base := "/"
|
||||
$myPsjSlice := "a,b,c" | splitList ","
|
||||
pathSliceJoin $myPsjSlice
|
||||
{{- $base := "/" }}
|
||||
{{- $myPsjSlice := "a,b,c" | splitList "," }}
|
||||
{{ pathSliceJoin $myPsjSlice }}
|
||||
|
||||
#
|
||||
|
||||
$base | pathSlicePipeJoin $myPsjSlice
|
||||
{{ $base | pathSlicePipeJoin $myPsjSlice }}
|
||||
|
||||
#
|
||||
|
||||
pathSubJoin $base "a" "b" "c"
|
||||
{{ pathSubJoin $base "a" "b" "c" }}
|
||||
|
||||
|
||||
######################
|
||||
# OS/System/Platform #
|
||||
######################
|
||||
|
||||
osPathJoin "a" "b" "c"
|
||||
{{ osPathJoin "a" "b" "c" }}
|
||||
|
||||
osPathJoin "/" "a" "b" "c"
|
||||
{{ osPathJoin "a" "b" "c" }}
|
||||
|
||||
osPathJoin "/a" "b" "c"
|
||||
{{ osPathJoin "a" "b" "c" }}
|
||||
|
||||
#
|
||||
|
||||
osPathPipeJoin "b" "c" "d" "a"
|
||||
{{ osPathPipeJoin "b" "c" "d" "a" }}
|
||||
|
||||
"a" | osPathPipeJoin "b" "c" "d"
|
||||
{{ "a" | osPathPipeJoin "b" "c" "d" }}
|
||||
|
||||
#
|
||||
|
||||
$osBase := "/"
|
||||
$myOsPsjSlice := "a,b,c" | splitList ","
|
||||
osPathSliceJoin $myOsPsjSlice
|
||||
{{- $osBase := "/" }}
|
||||
{{- $myOsPsjSlice := "a,b,c" | splitList "," }}
|
||||
{{ osPathSliceJoin $myOsPsjSlice }}
|
||||
|
||||
#
|
||||
|
||||
$osBase | osPathSlicePipeJoin $myOsPsjSlice
|
||||
{{ $osBase | osPathSlicePipeJoin $myOsPsjSlice }}
|
||||
|
||||
#
|
||||
|
||||
osPathSubJoin $osBase "a" "b" "c"
|
||||
{{ osPathSubJoin $osBase "a" "b" "c" }}
|
||||
190
tplx/sprigx/consts.go
Normal file
190
tplx/sprigx/consts.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`net`
|
||||
`net/netip`
|
||||
`os`
|
||||
`os/user`
|
||||
`path`
|
||||
`path/filepath`
|
||||
`runtime`
|
||||
`time`
|
||||
|
||||
`github.com/davecgh/go-spew/spew`
|
||||
`github.com/shirou/gopsutil/v4/cpu`
|
||||
`github.com/shirou/gopsutil/v4/disk`
|
||||
`github.com/shirou/gopsutil/v4/host`
|
||||
`github.com/shirou/gopsutil/v4/load`
|
||||
`github.com/shirou/gopsutil/v4/mem`
|
||||
psnet `github.com/shirou/gopsutil/v4/net`
|
||||
`github.com/shirou/gopsutil/v4/process`
|
||||
`github.com/shirou/gopsutil/v4/sensors`
|
||||
`go4.org/netipx`
|
||||
`r00t2.io/goutils/timex`
|
||||
`r00t2.io/sysutils`
|
||||
)
|
||||
|
||||
var (
|
||||
// genericMap holds functions usable/intended for use in either an [html/template.FuncMap] or [text/template.FuncMap].
|
||||
genericMap map[string]any = map[string]any{
|
||||
// Debugging
|
||||
"dump": spew.Sdump,
|
||||
/*
|
||||
"Meta"/Template-Helpers
|
||||
*/
|
||||
"metaIsNil": metaIsNil,
|
||||
/*
|
||||
Networking (net)
|
||||
*/
|
||||
"netCidrMask": net.CIDRMask,
|
||||
"netExtractAddr": netExtractAddr,
|
||||
"netExtractHost": netExtractHost,
|
||||
"netExtractIpnet": netExtractIpnet,
|
||||
"netExtractPort": netExtractPort,
|
||||
"netIfaces": net.Interfaces,
|
||||
"netIp4Mask": netIp4Mask,
|
||||
"netJoinHostPort": net.JoinHostPort,
|
||||
"netParseIP": net.ParseIP,
|
||||
/*
|
||||
Networking (net/netip)
|
||||
*/
|
||||
"netipAddrPort": netip.AddrPortFrom,
|
||||
"netipParseAddr": netip.ParseAddr,
|
||||
"netipParseAddrPort": netip.ParseAddrPort,
|
||||
"netipParsePrefix": netip.ParsePrefix,
|
||||
"netipPrefix": netip.PrefixFrom,
|
||||
/*
|
||||
Networking (go4.org/netipx)
|
||||
*/
|
||||
"netipxAddrIpNet": netipx.AddrIPNet,
|
||||
"netipxCmpPfx": netipx.ComparePrefix,
|
||||
"netipxFromStdAddr": netipxFromStdAddr,
|
||||
"netipxFromIp": netipxFromIp,
|
||||
"netipxFromIpNet": netipxFromIpNet,
|
||||
"netipxParseRange": netipx.ParseIPRange,
|
||||
"netipxPfxAddr": netipx.ParsePrefixOrAddr,
|
||||
"netipxPfxIpNet": netipx.PrefixIPNet,
|
||||
"netipxPfxLast": netipx.PrefixLastIP,
|
||||
"netipxPfxRange": netipx.RangeOfPrefix,
|
||||
"netipxRange": netipx.IPRangeFrom,
|
||||
/*
|
||||
Numbers/Math
|
||||
*/
|
||||
"numFloat32Str": numFloat32Str,
|
||||
"numFloat64": numFloat64,
|
||||
"numFloat64Str": numFloat64Str,
|
||||
"numFloatStr": numFloatStr,
|
||||
/*
|
||||
OS
|
||||
*/
|
||||
"osFQDN": osFQDN,
|
||||
"osGroupById": osGroupById,
|
||||
"osGroupByName": user.LookupGroup,
|
||||
"osHost": osHost,
|
||||
"osHostname": os.Hostname,
|
||||
"osIdState": sysutils.GetIDState,
|
||||
"osUser": user.Current,
|
||||
"osUserById": osUserById,
|
||||
"osUserByName": user.Lookup,
|
||||
/*
|
||||
Paths
|
||||
*/
|
||||
// Paths: Generic
|
||||
"pathJoin": path.Join,
|
||||
"pathPipeJoin": pathPipeJoin,
|
||||
"pathSliceJoin": pathSliceJoin,
|
||||
"pathSlicePipeJoin": pathSlicePipeJoin,
|
||||
"pathSubJoin": pathSubJoin,
|
||||
// Paths: OS/Platform
|
||||
"osPathJoin": filepath.Join,
|
||||
"osPathPipeJoin": osPathPipeJoin,
|
||||
"osPathSep": osPathSep,
|
||||
"osPathSliceJoin": osPathSliceJoin,
|
||||
"osPathSlicePipeJoin": osPathSlicePipeJoin,
|
||||
"osPathSubJoin": osPathSubJoin,
|
||||
/*
|
||||
PSUtil
|
||||
(https://pkg.go.dev/github.com/shirou/gopsutil/v4)
|
||||
*/
|
||||
// .../cpu
|
||||
"psCpuCnts": cpu.Counts,
|
||||
"psCpuInfo": cpu.Info,
|
||||
"psCpuPct": cpu.Percent,
|
||||
"psCpuTimes": cpu.Times,
|
||||
// .../disk
|
||||
"psDiskIoCnts": disk.IOCounters,
|
||||
"psDiskLabel": disk.Label,
|
||||
"psDiskParts": disk.Partitions,
|
||||
"psDiskSerial": disk.SerialNumber,
|
||||
"psDiskUsage": disk.Usage,
|
||||
// .../host
|
||||
"psHostBoot": host.BootTime,
|
||||
"psHostId": host.HostID,
|
||||
"psHostInfo": host.Info,
|
||||
"psHostKernArch": host.KernelArch,
|
||||
"psHostKernVer": host.KernelVersion,
|
||||
"psHostPlatInfo": psHostPlatInfo,
|
||||
"psHostUptime": host.Uptime,
|
||||
"psHostUsers": host.Users,
|
||||
"psHostVirt": psHostVirt,
|
||||
// .../load
|
||||
"psLoadAvg": load.Avg,
|
||||
"psLoadMisc": load.Misc,
|
||||
// .../mem
|
||||
"psMemSwap": mem.SwapMemory,
|
||||
"psMemSwapDevs": mem.SwapDevices,
|
||||
"psMemVMem": mem.VirtualMemory,
|
||||
// .../net
|
||||
"psNetConns": psnet.Connections,
|
||||
"psNetConnsMax": psnet.ConnectionsMax,
|
||||
"psNetConnsPid": psnet.ConnectionsPid,
|
||||
"psNetConnsPidMax": psnet.ConnectionsPidMax,
|
||||
"psNetCTStats": psnet.ConntrackStats,
|
||||
"psNetCTStatList": psnet.NewConntrackStatList,
|
||||
"psNetFilterCnts": psnet.FilterCounters,
|
||||
"psNetIoCnts": psnet.IOCounters,
|
||||
"psNetIoCntsFile": psnet.IOCountersByFile,
|
||||
"psNetIfaces": psnet.Interfaces,
|
||||
"psNetPids": psnet.Pids,
|
||||
"psNetProtoCnt": psnet.ProtoCounters,
|
||||
// .../process
|
||||
"psProcs": process.Processes,
|
||||
"psProcNew": process.NewProcess,
|
||||
"psProcPids": process.Pids,
|
||||
"psProcPidExists": process.PidExists,
|
||||
// .../sensors
|
||||
"psSensorTemps": sensors.SensorsTemperatures,
|
||||
/*
|
||||
Strings
|
||||
*/
|
||||
"extIndent": extIndent, // PR in: https://github.com/Masterminds/sprig/pull/468
|
||||
/*
|
||||
System/Platform
|
||||
*/
|
||||
"sysArch": sysArch,
|
||||
"sysNumCpu": runtime.NumCPU,
|
||||
"sysOsName": sysOsNm,
|
||||
"sysRuntime": sysRuntime,
|
||||
/*
|
||||
Time/Dates/Timestamps
|
||||
*/
|
||||
"tmDate": time.Date,
|
||||
"tmFmt": tmFmt,
|
||||
"tmFloatMicro": timex.F64Microseconds,
|
||||
"tmFloatMilli": timex.F64Milliseconds,
|
||||
"tmFloatNano": timex.F64Nanoseconds,
|
||||
"tmFloat": timex.F64Seconds,
|
||||
"tmNow": time.Now,
|
||||
"tmParseDur8n": time.ParseDuration,
|
||||
"tmParseMonth": tmParseMonth,
|
||||
"tmParseMonthInt": tmParseMonthInt,
|
||||
"tmParseMonthStr": tmParseMonthStr,
|
||||
"tmParseTime": time.Parse,
|
||||
}
|
||||
|
||||
// htmlMap holds functions usable/intended for use in only an [html/template.FuncMap].
|
||||
htmlMap map[string]any = map[string]any{}
|
||||
|
||||
// txtMap holds functions usable/intended for use in only a [text/template.FuncMap].
|
||||
txtMap map[string]any = map[string]any{}
|
||||
)
|
||||
9
tplx/sprigx/consts_darwin.go
Normal file
9
tplx/sprigx/consts_darwin.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build darwin
|
||||
|
||||
package sprigx
|
||||
|
||||
var (
|
||||
osGenericMap map[string]any = map[string]any{}
|
||||
osHtmlMap map[string]any = map[string]any{}
|
||||
osTxtMap map[string]any = map[string]any{}
|
||||
)
|
||||
25
tplx/sprigx/consts_linux.go
Normal file
25
tplx/sprigx/consts_linux.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build linux
|
||||
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`github.com/shirou/gopsutil/v4/mem`
|
||||
psnet `github.com/shirou/gopsutil/v4/net`
|
||||
)
|
||||
|
||||
var (
|
||||
osGenericMap map[string]any = map[string]any{
|
||||
/*
|
||||
PSUtil
|
||||
(https://pkg.go.dev/github.com/shirou/gopsutil/v4)
|
||||
*/
|
||||
// .../mem
|
||||
"psMemExVMem": mem.NewExLinux().VirtualMemory,
|
||||
// .../net
|
||||
"psNetRev": psnet.Reverse,
|
||||
// .../sensors
|
||||
"psSensorExTemp": psSensorExTemp,
|
||||
}
|
||||
osHtmlMap map[string]any = map[string]any{}
|
||||
osTxtMap map[string]any = map[string]any{}
|
||||
)
|
||||
9
tplx/sprigx/consts_unknown.go
Normal file
9
tplx/sprigx/consts_unknown.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !(linux || windows || darwin)
|
||||
|
||||
package sprigx
|
||||
|
||||
var (
|
||||
osGenericMap map[string]any = map[string]any{}
|
||||
osHtmlMap map[string]any = map[string]any{}
|
||||
osTxtMap map[string]any = map[string]any{}
|
||||
)
|
||||
24
tplx/sprigx/consts_windows.go
Normal file
24
tplx/sprigx/consts_windows.go
Normal file
@@ -0,0 +1,24 @@
|
||||
//go:build windows
|
||||
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`github.com/shirou/gopsutil/v4/mem`
|
||||
`github.com/shirou/gopsutil/v4/winservices`
|
||||
)
|
||||
|
||||
var (
|
||||
osGenericMap map[string]any = map[string]any{
|
||||
/*
|
||||
PSUtil
|
||||
(https://pkg.go.dev/github.com/shirou/gopsutil/v4)
|
||||
*/
|
||||
// .../mem
|
||||
"psMemExVMem": mem.NewExWindows().VirtualMemory,
|
||||
// .../winservices
|
||||
"psWinsvcList": winservices.ListServices,
|
||||
"psWinsvcNew": winservices.NewService,
|
||||
}
|
||||
osHtmlMap map[string]any = map[string]any{}
|
||||
osTxtMap map[string]any = map[string]any{}
|
||||
)
|
||||
16
tplx/sprigx/doc.go
Normal file
16
tplx/sprigx/doc.go
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
Package sprigx aims to provide additional functions that the author believes are missing from [sprig] ([Go docs]).
|
||||
|
||||
It's a decent enough "basics" library, but I frequently find it falls short once you start needing domain-specific data.
|
||||
|
||||
These may get merged into sprig, they may not. It all depends on how responsive they are to PRs.
|
||||
Given that they only update it every 6 months or so, however...
|
||||
|
||||
See the [full documentation] on the [repo].
|
||||
|
||||
[sprig]: https://masterminds.github.io/sprig/
|
||||
[Go docs]: https://pkg.go.dev/github.com/Masterminds/sprig/v3
|
||||
[full documentation]: https://git.r00t2.io/r00t2/go_goutils/src/branch/master/tplx/sprigx/README.adoc
|
||||
[repo]: https://git.r00t2.io/r00t2/go_goutils
|
||||
*/
|
||||
package sprigx
|
||||
77
tplx/sprigx/docinfo.html
Normal file
77
tplx/sprigx/docinfo.html
Normal file
@@ -0,0 +1,77 @@
|
||||
<!-- https://stackoverflow.com/a/34481639 -->
|
||||
<!-- Generate a nice TOC -->
|
||||
<script src="https://code.jquery.com/jquery-1.11.3.min.js"></script>
|
||||
<script src="https://code.jquery.com/ui/1.11.4/jquery-ui.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery.tocify/1.9.0/javascripts/jquery.tocify.min.js"></script>
|
||||
<!-- We do not need the tocify CSS because the asciidoc CSS already provides most of what we neeed -->
|
||||
|
||||
<style>
|
||||
.tocify-header {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.tocify-subheader {
|
||||
font-style: normal;
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
.tocify ul {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.tocify-focus {
|
||||
color: #7a2518;
|
||||
background-color: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.tocify-focus > a {
|
||||
color: #7a2518;
|
||||
}
|
||||
</style>
|
||||
|
||||
<script type="text/javascript">
|
||||
$(function () {
|
||||
// Add a new container for the tocify toc into the existing toc so we can re-use its
|
||||
// styling
|
||||
$("#toc").append("<div id='generated-toc'></div>");
|
||||
$("#generated-toc").tocify({
|
||||
extendPage: true,
|
||||
context: "#content",
|
||||
highlightOnScroll: true,
|
||||
hideEffect: "slideUp",
|
||||
// Use the IDs that asciidoc already provides so that TOC links and intra-document
|
||||
// links are the same. Anything else might confuse users when they create bookmarks.
|
||||
hashGenerator: function(text, element) {
|
||||
return $(element).attr("id");
|
||||
},
|
||||
// Smooth scrolling doesn't work properly if we use the asciidoc IDs
|
||||
smoothScroll: false,
|
||||
// Set to 'none' to use the tocify classes
|
||||
theme: "none",
|
||||
// Handle book (may contain h1) and article (only h2 deeper)
|
||||
selectors: $( "#content" ).has( "h1" ).size() > 0 ? "h1,h2,h3,h4,h5" : "h2,h3,h4,h5",
|
||||
ignoreSelector: ".discrete"
|
||||
});
|
||||
|
||||
// Switch between static asciidoc toc and dynamic tocify toc based on browser size
|
||||
// This is set to match the media selectors in the asciidoc CSS
|
||||
// Without this, we keep the dynamic toc even if it is moved from the side to preamble
|
||||
// position which will cause odd scrolling behavior
|
||||
var handleTocOnResize = function() {
|
||||
if ($(document).width() < 768) {
|
||||
$("#generated-toc").hide();
|
||||
$(".sectlevel0").show();
|
||||
$(".sectlevel1").show();
|
||||
}
|
||||
else {
|
||||
$("#generated-toc").show();
|
||||
$(".sectlevel0").hide();
|
||||
$(".sectlevel1").hide();
|
||||
}
|
||||
}
|
||||
|
||||
$(window).resize(handleTocOnResize);
|
||||
handleTocOnResize();
|
||||
});
|
||||
</script>
|
||||
|
||||
16
tplx/sprigx/errs.go
Normal file
16
tplx/sprigx/errs.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`errors`
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadAddr error = errors.New("invalid/bad address")
|
||||
ErrBadAddrPort error = errors.New("invalid/bad address/port")
|
||||
ErrBadMonth error = errors.New("could not determine/parse month")
|
||||
ErrBadNet error = errors.New("invalid/bad network")
|
||||
ErrOverflow error = errors.New("integer/buffer overflow")
|
||||
ErrBadType error = errors.New("an invalid/unknown type was passed")
|
||||
ErrNilVal error = errors.New("a nil value was passed")
|
||||
ErrUnderflow error = errors.New("integer/buffer underflow")
|
||||
)
|
||||
356
tplx/sprigx/funcs.go
Normal file
356
tplx/sprigx/funcs.go
Normal file
@@ -0,0 +1,356 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`errors`
|
||||
htpl "html/template"
|
||||
`math`
|
||||
`reflect`
|
||||
`strconv`
|
||||
ttpl "text/template"
|
||||
|
||||
`github.com/Masterminds/sprig/v3`
|
||||
)
|
||||
|
||||
/*
|
||||
Many of these functions are modeled after sprig's.
|
||||
*/
|
||||
|
||||
/*
|
||||
CombinedFuncMap returns a generic function map (like [FuncMap]) combined with
|
||||
[github.com/Masterminds/sprig/v3.GenericFuncMap].
|
||||
|
||||
If preferSprigx is true, SprigX function names will override Sprig
|
||||
functions with the same name.
|
||||
If false, Sprig functions will override conflicting SprigX functions
|
||||
with the same name.
|
||||
|
||||
You probably want [CombinedHtmlFuncMap] or [CombinedTxtFuncMap] instead,
|
||||
as they wrap this with the appropriate type.
|
||||
*/
|
||||
func CombinedFuncMap(preferSprigX bool) (fmap map[string]any) {
|
||||
|
||||
var fn any
|
||||
var fnNm string
|
||||
var sprigMap map[string]interface{} = sprig.GenericFuncMap()
|
||||
var sprigxMap map[string]any = FuncMap()
|
||||
|
||||
if preferSprigX {
|
||||
fmap = sprigMap
|
||||
for fnNm, fn = range sprigxMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
} else {
|
||||
fmap = sprigxMap
|
||||
for fnNm, fn = range sprigMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
CombinedHtmlFuncMap returns an [htpl.FuncMap] (like [HtmlFuncMap]) combined with
|
||||
[github.com/Masterminds/sprig/v3.HtmlFuncMap].
|
||||
|
||||
If preferSprigx is true, SprigX function names will override Sprig
|
||||
functions with the same name.
|
||||
If false, Sprig functions will override conflicting SprigX functions
|
||||
with the same name.
|
||||
*/
|
||||
func CombinedHtmlFuncMap(preferSprigX bool) (fmap htpl.FuncMap) {
|
||||
|
||||
var fn any
|
||||
var fnNm string
|
||||
var sprigMap htpl.FuncMap = sprig.HtmlFuncMap()
|
||||
var sprigxMap htpl.FuncMap = HtmlFuncMap()
|
||||
|
||||
if preferSprigX {
|
||||
fmap = sprigMap
|
||||
for fnNm, fn = range sprigxMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
} else {
|
||||
fmap = sprigxMap
|
||||
for fnNm, fn = range sprigMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
CombinedTxtFuncMap returns a [ttpl.FuncMap] (like [TxtFuncMap]) combined with
|
||||
[github.com/Masterminds/sprig/v3.TxtFuncMap].
|
||||
|
||||
If preferSprigx is true, SprigX function names will override Sprig
|
||||
functions with the same name.
|
||||
If false, Sprig functions will override conflicting SprigX functions
|
||||
with the same name.
|
||||
*/
|
||||
func CombinedTxtFuncMap(preferSprigX bool) (fmap ttpl.FuncMap) {
|
||||
|
||||
var fn any
|
||||
var fnNm string
|
||||
var sprigMap ttpl.FuncMap = sprig.TxtFuncMap()
|
||||
var sprigxMap ttpl.FuncMap = TxtFuncMap()
|
||||
|
||||
if preferSprigX {
|
||||
fmap = sprigMap
|
||||
for fnNm, fn = range sprigxMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
} else {
|
||||
fmap = sprigxMap
|
||||
for fnNm, fn = range sprigMap {
|
||||
fmap[fnNm] = fn
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
FuncMap returns a generic function map.
|
||||
|
||||
You probably want [HtmlFuncMap] or [TxtFuncMap] instead,
|
||||
as they wrap this with the appropriate type.
|
||||
*/
|
||||
func FuncMap() (fmap map[string]any) {
|
||||
|
||||
var fn string
|
||||
var f any
|
||||
|
||||
fmap = make(map[string]any, len(genericMap))
|
||||
|
||||
for fn, f = range genericMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
if osGenericMap != nil && len(osGenericMap) > 0 {
|
||||
for fn, f = range osGenericMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// HtmlFuncMap returns an [html/template.FuncMap].
|
||||
func HtmlFuncMap() (fmap htpl.FuncMap) {
|
||||
|
||||
var fn string
|
||||
var f any
|
||||
|
||||
fmap = htpl.FuncMap(FuncMap())
|
||||
|
||||
if htmlMap != nil && len(htmlMap) > 0 {
|
||||
for fn, f = range htmlMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
}
|
||||
|
||||
if osHtmlMap != nil && len(osHtmlMap) > 0 {
|
||||
for fn, f = range osHtmlMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Nop explicitly performs a NO-OP and returns an empty string, allowing one to override "unsafe" functions.
|
||||
func Nop(obj ...any) (s string) {
|
||||
return
|
||||
}
|
||||
|
||||
// TxtFuncMap returns a [text/template.FuncMap].
|
||||
func TxtFuncMap() (fmap ttpl.FuncMap) {
|
||||
|
||||
var fn string
|
||||
var f any
|
||||
|
||||
fmap = ttpl.FuncMap(FuncMap())
|
||||
|
||||
if txtMap != nil && len(txtMap) > 0 {
|
||||
for fn, f = range txtMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
}
|
||||
|
||||
if osTxtMap != nil && len(osTxtMap) > 0 {
|
||||
for fn, f = range osTxtMap {
|
||||
fmap[fn] = f
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
toFloat64 uses reflection to resolve any string or numeric type (even custom types) to a float64.
|
||||
|
||||
It wraps toString for string types but will fall back to checking numeric types.
|
||||
|
||||
If err != nil, then NaN (if true) indicates that:
|
||||
|
||||
* val is a string (or pointer to a string), but
|
||||
* is not a valid numeric string
|
||||
|
||||
(you can do this from the caller as well by calling `errors.Is(err, strconv.ErrSyntax)`).
|
||||
err will always be non-nil if NaN is true.
|
||||
|
||||
err will be ErrNilVal if val is nil.
|
||||
*/
|
||||
func toFloat64(val any) (f float64, NaN bool, err error) {
|
||||
|
||||
var s string
|
||||
var k reflect.Kind
|
||||
var rv reflect.Value
|
||||
|
||||
// toString will return ErrNilVal if nil.
|
||||
if s, err = toString(val); err != nil {
|
||||
if errors.Is(err, ErrBadType) {
|
||||
// This is OK, it's (hopefully) a number type.
|
||||
err = nil
|
||||
} else {
|
||||
// *probably* ErrNilVal.
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// We can go ahead and parse this directly since it's already deref'd if a ptr.
|
||||
if f, err = strconv.ParseFloat(s, 64); err != nil {
|
||||
NaN = errors.Is(err, strconv.ErrSyntax)
|
||||
}
|
||||
// We can return regardless here; it's up to the caller to check NaN/err.
|
||||
// If they're false/nil, f is parsed already!
|
||||
return
|
||||
}
|
||||
|
||||
rv = reflect.ValueOf(val)
|
||||
k = rv.Kind()
|
||||
|
||||
if k == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
// *technically* this should be handled above, but best be safe.
|
||||
err = ErrNilVal
|
||||
return
|
||||
}
|
||||
rv = rv.Elem()
|
||||
k = rv.Kind()
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f = float64(rv.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f = float64(rv.Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
f = rv.Float()
|
||||
default:
|
||||
// No need to check for string types since we do that near the beginning.
|
||||
err = ErrBadType
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
toInt wraps toFloat64, rounds it to the nearest integer,
|
||||
and converts to an int.
|
||||
|
||||
NaN, err have the same meaning as in toFloat64.
|
||||
|
||||
This function will panic if float64(val)'s f return exceeds
|
||||
math.MaxInt on your platform.
|
||||
*/
|
||||
func toInt(val any) (i int, NaN bool, err error) {
|
||||
|
||||
var f float64
|
||||
|
||||
if f, NaN, err = toFloat64(val); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
i = int(math.Round(f))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
toPosFloat64 wraps toFloat64 and ensures that it is a positive float64.
|
||||
|
||||
NaN, err have the same meaning as in toFloat64.
|
||||
*/
|
||||
func toPosFloat64(val any) (f float64, NaN bool, err error) {
|
||||
|
||||
if f, NaN, err = toFloat64(val); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f = math.Abs(f)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
toPosInt wraps toPosFloat64, rounds it to the nearest integer,
|
||||
and converts to an int.
|
||||
|
||||
NaN, err have the same meaning as in toPosFloat64 (and thus toFloat64).
|
||||
|
||||
This function will panic if float64(val)'s f return exceeds
|
||||
math.MaxInt on your platform.
|
||||
*/
|
||||
func toPosInt(val any) (i int, NaN bool, err error) {
|
||||
|
||||
var f float64
|
||||
|
||||
if f, NaN, err = toPosFloat64(val); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
i = int(math.Round(f))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
toString uses reflection to resolve any string value (even custom types and ptrs)
|
||||
to a concrete string.
|
||||
|
||||
err will be ErrBadType if val is not a string type/string-derived type.
|
||||
err will be ErrNilVal if val is nil.
|
||||
*/
|
||||
func toString(val any) (s string, err error) {
|
||||
|
||||
var rv reflect.Value
|
||||
var k reflect.Kind
|
||||
|
||||
if val == nil {
|
||||
err = ErrNilVal
|
||||
return
|
||||
}
|
||||
|
||||
rv = reflect.ValueOf(val)
|
||||
k = rv.Kind()
|
||||
|
||||
if k == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
// *technically* this should be handled above, but best be safe.
|
||||
err = ErrNilVal
|
||||
return
|
||||
}
|
||||
rv = rv.Elem()
|
||||
k = rv.Kind()
|
||||
}
|
||||
|
||||
if k == reflect.String {
|
||||
s = rv.String()
|
||||
} else {
|
||||
err = ErrBadType
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
33
tplx/sprigx/funcs_test.go
Normal file
33
tplx/sprigx/funcs_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`bytes`
|
||||
_ "embed"
|
||||
"testing"
|
||||
`text/template`
|
||||
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed "_test.tpl"
|
||||
testTplBytes []byte
|
||||
testTpl *template.Template = template.Must(
|
||||
template.
|
||||
New("").
|
||||
Funcs(sprig.TxtFuncMap()).
|
||||
Funcs(TxtFuncMap()).
|
||||
Parse(string(testTplBytes)),
|
||||
)
|
||||
)
|
||||
|
||||
func TestFuncs(t *testing.T) {
|
||||
|
||||
var err error
|
||||
var buf *bytes.Buffer = new(bytes.Buffer)
|
||||
|
||||
if err = testTpl.Execute(buf, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(buf.String())
|
||||
}
|
||||
9
tplx/sprigx/funcs_tpl_meta.go
Normal file
9
tplx/sprigx/funcs_tpl_meta.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package sprigx
|
||||
|
||||
// metaIsNil returns true if obj is explicitly nil.
|
||||
func metaIsNil(obj any) (isNil bool) {
|
||||
|
||||
isNil = obj == nil
|
||||
|
||||
return
|
||||
}
|
||||
82
tplx/sprigx/funcs_tpl_net.go
Normal file
82
tplx/sprigx/funcs_tpl_net.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`math`
|
||||
`net`
|
||||
`strconv`
|
||||
)
|
||||
|
||||
// netExtractAddr calls net.ParseCIDR and returns the net.IP from it.
|
||||
func netExtractAddr(s string) (addr net.IP, err error) {
|
||||
|
||||
if addr, _, err = net.ParseCIDR(s); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netExtractHost extracts the host component from hostPort.
|
||||
func netExtractHost(hostPort string) (host string, err error) {
|
||||
|
||||
if host, _, err = net.SplitHostPort(hostPort); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netExtractIpnet calls net.ParseCIDR and returns the net.IPNet from it.
|
||||
func netExtractIpnet(s string) (ipnet *net.IPNet, err error) {
|
||||
|
||||
if _, ipnet, err = net.ParseCIDR(s); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netExtractPort extracts the port component from hostPort.
|
||||
func netExtractPort(hostPort string) (port uint16, err error) {
|
||||
|
||||
var portStr string
|
||||
var u64 uint64
|
||||
|
||||
if _, portStr, err = net.SplitHostPort(hostPort); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if u64, err = strconv.ParseUint(portStr, 10, 16); err != nil {
|
||||
return
|
||||
}
|
||||
port = uint16(u64)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netIp4Mask is a more flexible wrapper around net.IPv4Mask.
|
||||
func netIp4Mask(a, b, c, d any) (mask net.IPMask, err error) {
|
||||
|
||||
var idx int
|
||||
var elem any
|
||||
var elemInt int
|
||||
var mBytes [4]byte
|
||||
var orig [4]any = [4]any{a, b, c, d}
|
||||
|
||||
for idx, elem = range orig {
|
||||
if elemInt, _, err = toPosInt(elem); err != nil {
|
||||
return
|
||||
}
|
||||
if elemInt > math.MaxUint8 {
|
||||
err = ErrOverflow
|
||||
return
|
||||
}
|
||||
mBytes[idx] = byte(uint8(elemInt))
|
||||
}
|
||||
|
||||
mask = net.IPv4Mask(
|
||||
mBytes[0], mBytes[1], mBytes[2], mBytes[3],
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
47
tplx/sprigx/funcs_tpl_netipx.go
Normal file
47
tplx/sprigx/funcs_tpl_netipx.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`net`
|
||||
`net/netip`
|
||||
|
||||
`go4.org/netipx`
|
||||
)
|
||||
|
||||
// netipxFromStdAddr wraps go4.org/netipx.FromStdAddr to comply with Go template requirements.
|
||||
func netipxFromStdAddr(ip net.IP, port int, zone string) (addrPort netip.AddrPort, err error) {
|
||||
|
||||
var ok bool
|
||||
|
||||
if addrPort, ok = netipx.FromStdAddr(ip, port, zone); !ok {
|
||||
err = ErrBadAddrPort
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netipxFromIp wraps go4.org/netipx.FromStdIP to comply with Go template requirements.
|
||||
func netipxFromIp(ip net.IP) (addr netip.Addr, err error) {
|
||||
|
||||
var ok bool
|
||||
|
||||
if addr, ok = netipx.FromStdIP(ip); !ok {
|
||||
err = ErrBadAddr
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// netipxFromIpNet wraps go4.org/netipx.FromStdIPNet to comply with Go template requirements.
|
||||
func netipxFromIpNet(ipnet *net.IPNet) (pfx netip.Prefix, err error) {
|
||||
|
||||
var ok bool
|
||||
|
||||
if pfx, ok = netipx.FromStdIPNet(ipnet); !ok {
|
||||
err = ErrBadNet
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
51
tplx/sprigx/funcs_tpl_nums.go
Normal file
51
tplx/sprigx/funcs_tpl_nums.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`math/big`
|
||||
)
|
||||
|
||||
// numFloat64 returns any string representation of a numeric value or any type of numeric value to a float64.
|
||||
func numFloat64(val any) (f float64, err error) {
|
||||
|
||||
if f, _, err = toFloat64(val); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
numFloatStr wraps numFloat32Str and numFloat64Str.
|
||||
|
||||
val can be a string representation of any numeric value or any type of numeric value.
|
||||
*/
|
||||
func numFloatStr(val any) (s string, err error) {
|
||||
|
||||
var f float64
|
||||
|
||||
if f, _, err = toFloat64(val); err != nil {
|
||||
return
|
||||
}
|
||||
s = numFloat64Str(f)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// numFloat32Str returns float32 f as a complete string representation with no truncation (or right-padding).
|
||||
func numFloat32Str(f float32) (s string) {
|
||||
|
||||
s = numFloat64Str(float64(f))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// numFloat64Str returns float64 f as a complete string representation with no truncation (or right-padding).
|
||||
func numFloat64Str(f float64) (s string) {
|
||||
|
||||
var bf *big.Float
|
||||
|
||||
bf = big.NewFloat(f)
|
||||
s = bf.Text('f', -1)
|
||||
|
||||
return
|
||||
}
|
||||
99
tplx/sprigx/funcs_tpl_os.go
Normal file
99
tplx/sprigx/funcs_tpl_os.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`os`
|
||||
`os/user`
|
||||
`strconv`
|
||||
`strings`
|
||||
)
|
||||
|
||||
/*
|
||||
osGroupById returns os/user.LookupGroupId.
|
||||
|
||||
Can accept either a string (`"1000"`) or any
|
||||
numeric type (`1000`, `-1000`, `1000.0`, `MyCustomType(1000)`, etc.)
|
||||
*/
|
||||
func osGroupById(gid any) (g *user.Group, err error) {
|
||||
|
||||
var i int
|
||||
var NaN bool
|
||||
var gidStr string
|
||||
|
||||
if i, NaN, err = toPosInt(gid); err != nil {
|
||||
if NaN {
|
||||
err = nil
|
||||
if gidStr, err = toString(gid); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
gidStr = strconv.Itoa(i)
|
||||
}
|
||||
|
||||
g, err = user.LookupGroupId(gidStr)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
osFQDN (tries to) return the FQDN of this host.
|
||||
|
||||
Currently it just calls os.Hostname() but may be extended to "try harder" in the future.
|
||||
*/
|
||||
func osFQDN() (fqdn string, err error) {
|
||||
|
||||
fqdn, err = os.Hostname()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
osHost returns the system's "host shortname".
|
||||
|
||||
Currently it just calls os.Hostname() and takes the first
|
||||
"host label" (as RFCs refer to it), but it may be extended
|
||||
in the future.
|
||||
*/
|
||||
func osHost() (hostNm string, err error) {
|
||||
|
||||
hostNm, err = os.Hostname()
|
||||
|
||||
if hostNm == "" {
|
||||
return
|
||||
}
|
||||
hostNm = strings.Split(hostNm, ".")[0]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
osUserById returns an os/user.LookupId.
|
||||
|
||||
Can accept either a string (`"1000"`) or any
|
||||
numeric type (`1000`, `-1000`, `1000.0`, `MyCustomType(1000)`, etc.)
|
||||
*/
|
||||
func osUserById(uid any) (u *user.User, err error) {
|
||||
|
||||
var i int
|
||||
var NaN bool
|
||||
var uidStr string
|
||||
|
||||
if i, NaN, err = toPosInt(uid); err != nil {
|
||||
if NaN {
|
||||
err = nil
|
||||
if uidStr, err = toString(uid); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
uidStr = strconv.Itoa(i)
|
||||
}
|
||||
|
||||
u, err = user.LookupId(uidStr)
|
||||
|
||||
return
|
||||
}
|
||||
155
tplx/sprigx/funcs_tpl_paths.go
Normal file
155
tplx/sprigx/funcs_tpl_paths.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`os`
|
||||
`path`
|
||||
`path/filepath`
|
||||
)
|
||||
|
||||
/*
|
||||
//
|
||||
// GENERIC
|
||||
//
|
||||
*/
|
||||
|
||||
/*
|
||||
pathPipeJoin wraps path.Join with the root element at the *end* instead of the beginning.
|
||||
|
||||
{{ pathPipeJoin "b" "c" "a" }}
|
||||
|
||||
is equivalent to
|
||||
|
||||
path.Join("a", "b", "c")
|
||||
|
||||
This order variation is better suited for pipelines that pass the root path.
|
||||
*/
|
||||
func pathPipeJoin(elems ...string) (out string) {
|
||||
|
||||
var rootIdx int
|
||||
|
||||
if elems == nil || len(elems) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rootIdx = len(elems) - 1
|
||||
out = elems[rootIdx]
|
||||
|
||||
if len(elems) == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
out = pathSubJoin(out, elems[:rootIdx]...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// pathSliceJoin joins a slice of path segments.
|
||||
func pathSliceJoin(sl []string) (out string) {
|
||||
|
||||
out = path.Join(sl...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
pathSlicePipeJoin behaves like a mix of pathPipeJoin (in that it accepts the root element last)
|
||||
and pathSliceJoin (in that it accepts a slice of subpath segments).
|
||||
|
||||
It's essentially like pathSubJoin in reverse, and with an explicit slice.
|
||||
*/
|
||||
func pathSlicePipeJoin(sl []string, root string) (out string) {
|
||||
|
||||
out = pathSubJoin(root, sl...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
pathSubJoin is like path.Join except it takes an explicit root
|
||||
and additional slice of subpaths to sequentially join to it.
|
||||
*/
|
||||
func pathSubJoin(root string, elems ...string) (out string) {
|
||||
|
||||
if elems == nil || len(elems) == 0 {
|
||||
out = root
|
||||
return
|
||||
}
|
||||
|
||||
out = path.Join(
|
||||
root,
|
||||
path.Join(
|
||||
elems...,
|
||||
),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
//
|
||||
// OS/PLATFORM
|
||||
//
|
||||
*/
|
||||
|
||||
/*
|
||||
osPathPipeJoin is like pathPipeJoin but uses the rendering OS' path separator (os.PathSeparator).
|
||||
*/
|
||||
func osPathPipeJoin(elems ...string) (out string) {
|
||||
|
||||
var rootIdx int
|
||||
|
||||
if elems == nil || len(elems) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rootIdx = len(elems) - 1
|
||||
out = elems[rootIdx]
|
||||
|
||||
if len(elems) == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
out = osPathSubJoin(out, elems[:rootIdx]...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// osPathSep returns os.PathSeparator.
|
||||
func osPathSep() (out string) {
|
||||
|
||||
out = string(os.PathSeparator)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// osPathSliceJoin is the OS-specific implementation of pathSliceJoin.
|
||||
func osPathSliceJoin(sl []string) (out string) {
|
||||
out = filepath.Join(sl...)
|
||||
return
|
||||
}
|
||||
|
||||
// osPathSlicePipeJoin is the OS-specific implementation of pathSlicePipeJoin.
|
||||
func osPathSlicePipeJoin(sl []string, root string) (out string) {
|
||||
|
||||
out = osPathSubJoin(root, sl...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// osPathSubJoin is the OS-specific implementation of pathSubJoin.
|
||||
func osPathSubJoin(root string, elems ...string) (out string) {
|
||||
|
||||
if elems == nil || len(elems) == 0 {
|
||||
out = root
|
||||
return
|
||||
}
|
||||
|
||||
out = filepath.Join(
|
||||
root,
|
||||
filepath.Join(
|
||||
elems...,
|
||||
),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
43
tplx/sprigx/funcs_tpl_psutils.go
Normal file
43
tplx/sprigx/funcs_tpl_psutils.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`github.com/shirou/gopsutil/v4/host`
|
||||
)
|
||||
|
||||
/*
|
||||
psHostPlatInfo returns a "squashed" github.com/shirou/gopsutil/v4/host.PlatformInformation;
|
||||
normally it returns a (string, string, string, error)
|
||||
but you can only have a (any) or (any, error) return in Golang templates.
|
||||
*/
|
||||
func psHostPlatInfo() (platInfo [3]string, err error) {
|
||||
|
||||
var s1 string
|
||||
var s2 string
|
||||
var s3 string
|
||||
|
||||
if s1, s2, s3, err = host.PlatformInformation(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
platInfo = [3]string{s1, s2, s3}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
psHostVirt returns a "squared" github.com/shirou/gopsutil/v4/host.Virtualization;
|
||||
normally it returns a (string, string, error) but Go templates etc.
|
||||
*/
|
||||
func psHostVirt() (virtInfo [2]string, err error) {
|
||||
|
||||
var s1 string
|
||||
var s2 string
|
||||
|
||||
if s1, s2, err = host.Virtualization(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
virtInfo = [2]string{s1, s2}
|
||||
|
||||
return
|
||||
}
|
||||
15
tplx/sprigx/funcs_tpl_psutils_linux.go
Normal file
15
tplx/sprigx/funcs_tpl_psutils_linux.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`context`
|
||||
|
||||
`github.com/shirou/gopsutil/v4/sensors`
|
||||
)
|
||||
|
||||
// psSensorExTemp wraps github.com/shirou/gopsutil/v4/sensors.NewExLinux().TemperatureWithContext() to not require a context.
|
||||
func psSensorExTemp() (exTemps []sensors.ExTemperature, err error) {
|
||||
|
||||
exTemps, err = sensors.NewExLinux().TemperatureWithContext(context.Background())
|
||||
|
||||
return
|
||||
}
|
||||
52
tplx/sprigx/funcs_tpl_strings.go
Normal file
52
tplx/sprigx/funcs_tpl_strings.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`strings`
|
||||
)
|
||||
|
||||
/*
|
||||
extIndent serves as a much more flexible alternative to the Sprig `indent`.
|
||||
|
||||
It has 6 arguments (the last of which may be passed in via pipeline):
|
||||
|
||||
* levels: The level of indentation for the text. If less than or equal to `0`, `extIndent` just returns `<input>` as-is and NO-OPs otherwise.
|
||||
* skipFirst: If true, skip indenting the first line. This is particularly handy if you like to visually align your function calls in your templates.
|
||||
* skipEmpty: If true, do not add an indent to *empty* lines (where an "empty line" means "only has a linebreak").
|
||||
* skipWhitespace: If true, do not add an indent to lines that *only* consist of whitespace (spaces, tabs, etc.) and a linebreak.
|
||||
* indentString: The string to use as the "indent character". This can be any string, such as `" "`, `"\t"`, `"."`, `"|"`, `"=="` etc.
|
||||
(In fact, if indentString is set to "\n" and levels is always set to 1, this function can even be used to doubelspace text!)
|
||||
* input: The text to be indented. Because it is the last argument, `extIndent` works with pipelined text as well.
|
||||
|
||||
*/
|
||||
func extIndent(levels int, skipFirst, skipEmpty, skipWhitespace bool, indentString, input string) (out string) {
|
||||
|
||||
var idx int
|
||||
var pad string
|
||||
var line string
|
||||
var lines []string
|
||||
|
||||
if levels <= 0 {
|
||||
out = input
|
||||
return
|
||||
}
|
||||
|
||||
pad = strings.Repeat(indentString, levels)
|
||||
lines = strings.Split(input, "\n")
|
||||
|
||||
for idx, line = range lines {
|
||||
if idx == 0 && skipFirst {
|
||||
continue
|
||||
}
|
||||
if skipWhitespace && strings.TrimSpace(line) == "" && line != "" {
|
||||
continue
|
||||
}
|
||||
if skipEmpty && (line == "" || line == "\r") {
|
||||
continue
|
||||
}
|
||||
lines[idx] = pad + line
|
||||
}
|
||||
|
||||
out = strings.Join(lines, "\n")
|
||||
|
||||
return
|
||||
}
|
||||
39
tplx/sprigx/funcs_tpl_sys.go
Normal file
39
tplx/sprigx/funcs_tpl_sys.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`fmt`
|
||||
`runtime`
|
||||
)
|
||||
|
||||
// sysArch returns [runtime.GOARCH].
|
||||
func sysArch() (out string) {
|
||||
|
||||
out = runtime.GOARCH
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// sysOsNm returns [runtime.GOOS].
|
||||
func sysOsNm() (out string) {
|
||||
|
||||
out = runtime.GOOS
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// sysRuntime returns various information from [runtime].
|
||||
func sysRuntime() (out map[string]string) {
|
||||
|
||||
out = map[string]string{
|
||||
"compiler": runtime.Compiler,
|
||||
"arch": runtime.GOARCH,
|
||||
"os": runtime.GOOS,
|
||||
"maxprocs": fmt.Sprintf("%d", runtime.GOMAXPROCS(-1)),
|
||||
"cpu_cnt": fmt.Sprintf("%d", runtime.NumCPU()),
|
||||
"num_cgo": fmt.Sprintf("%d", runtime.NumCgoCall()),
|
||||
"num_go": fmt.Sprintf("%d", runtime.NumGoroutine()),
|
||||
"go_ver": runtime.Version(),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
139
tplx/sprigx/funcs_tpl_time.go
Normal file
139
tplx/sprigx/funcs_tpl_time.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package sprigx
|
||||
|
||||
import (
|
||||
`errors`
|
||||
`strconv`
|
||||
`strings`
|
||||
`time`
|
||||
)
|
||||
|
||||
/*
|
||||
tmFmt formats time t using format string fstr.
|
||||
|
||||
While one certainly can do the same via e.g.
|
||||
|
||||
{{- $t := tmNow -}}
|
||||
{{ $t.Format $fstr }}
|
||||
|
||||
This takes a time.Time as the second (and last) parameter,
|
||||
allowing it to work in pipelines.
|
||||
*/
|
||||
func tmFmt(fstr string, t time.Time) (out string) {
|
||||
|
||||
out = t.Format(fstr)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
tmParseMonth attempts to first try tmParseMonthInt
|
||||
and then tries tmParseMonthStr if v is not "numeric".
|
||||
*/
|
||||
func tmParseMonth(v any) (mon time.Month, err error) {
|
||||
|
||||
var s string
|
||||
|
||||
if mon, err = tmParseMonthInt(v); err != nil {
|
||||
if errors.Is(err, strconv.ErrSyntax) {
|
||||
// NaN
|
||||
err = nil
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If it gets here, it's a non-numeric string.
|
||||
if s, err = toString(v); err != nil {
|
||||
return
|
||||
}
|
||||
if mon, err = tmParseMonthStr(s); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
tmParseMonthInt parses a number representation of month n to a time.Month.
|
||||
n may be any numeric type or a string representation of a number
|
||||
(or a custom type derived from those).
|
||||
|
||||
A negative integer (or float, etc.) will be converted to a positive one (e.g. -6 => 6 => time.June).
|
||||
|
||||
floats are rounded to the nearest integer.
|
||||
|
||||
The integer should map directly to the month constants in the time module:
|
||||
|
||||
* 1: January
|
||||
* 2: February
|
||||
* 3: March
|
||||
* 4: April
|
||||
* 5: May
|
||||
* 6: June
|
||||
* 7: July
|
||||
* 8: August
|
||||
* 9: September
|
||||
* 10: October
|
||||
* 11: November
|
||||
* 12: December
|
||||
|
||||
If n resolves to 0, mon will be the current month (as determined by time.Now).
|
||||
|
||||
If n resolves to > 12, err will be ErrBadMonth.
|
||||
*/
|
||||
func tmParseMonthInt(n any) (mon time.Month, err error) {
|
||||
|
||||
var i int
|
||||
|
||||
if i, _, err = toPosInt(n); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
mon = time.Now().Month()
|
||||
return
|
||||
}
|
||||
|
||||
if i > 12 {
|
||||
err = ErrBadMonth
|
||||
return
|
||||
}
|
||||
|
||||
mon = time.Month(i)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
tmParseMonthStr parses a string representation of month s to a time.Month.
|
||||
|
||||
It normalizes s to lowercase and only uses the first 3 characters
|
||||
(the minimum length needed to determine month name
|
||||
uniqueness - "June" vs. "July", "March" vs. "May").
|
||||
|
||||
An empty (or whitespace-only) string will use the current month (as determined by time.Now).
|
||||
*/
|
||||
func tmParseMonthStr(s string) (mon time.Month, err error) {
|
||||
|
||||
var i int
|
||||
var m time.Month
|
||||
|
||||
if strings.TrimSpace(s) == "" {
|
||||
mon = time.Now().Month()
|
||||
return
|
||||
}
|
||||
|
||||
s = strings.ToLower(strings.TrimSpace(s))[0:3]
|
||||
|
||||
for i = range 12 {
|
||||
m = time.Month(i + 1)
|
||||
if strings.ToLower(m.String())[0:3] == s {
|
||||
mon = m
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = ErrBadMonth
|
||||
|
||||
return
|
||||
}
|
||||
11
uuidx/consts.go
Normal file
11
uuidx/consts.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package uuidx
|
||||
|
||||
const (
|
||||
RfcNone RfcGen = iota
|
||||
Rfc4122
|
||||
Rfc9562
|
||||
)
|
||||
|
||||
const (
|
||||
MsGuidThreshold int = 4
|
||||
)
|
||||
73
uuidx/doc.go
Normal file
73
uuidx/doc.go
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Package uuidx intends to supplement [github.com/google/uuid].
|
||||
|
||||
# Microsoft GUID Shenanigans
|
||||
|
||||
The following functions are provided to deal with [Microsoft's incompetence]:
|
||||
|
||||
* [DetectMsGuid] (a confidence'd determination if a UUID is a Microsoft GUID or not)
|
||||
* [IsFlippedEndian] for flipped-endian [uuid.UUID] comparison (e.g. a is the Microsoft-flipped-endian version of b)
|
||||
* [IsMsGuid] (wraps [DetectMsGuid] and returns true if confidence is reasonably strong that it's a Microsoft GUID)
|
||||
* [IsRfc] (the inverse of IsMsGuid, but also checks for strict RFC compliance and returns which RFC)
|
||||
* [MsGuidToUuid] (explicitly convert/ensure a GUID/UUID is likely a UUID)
|
||||
* [ToggleUuidMsGuid] (blindly flip the endianness of selected byte ranges for MS GUID <-> UUID conversion)
|
||||
* [UuidToMsGuid] (explicitly convert/ensure a GUID/UUID is likely an MS GUID)
|
||||
|
||||
Microsoft, in their typical insanity, uses a proprietary UUID format (usually referred to as the "Microsoft GUID Format"
|
||||
or "Mixed-Endian Format").
|
||||
|
||||
Normally for, for example a UUIDv4, it's structured as thus per RFC 9562 [§ 5.4] (which obsoletes RFC 4122 [§ 4.4]):
|
||||
|
||||
A B C D E
|
||||
HEX(BE(uint32))-HEX(BE(uint16))-HEX(BE(uint16))-HEX(BE(<uint16>), BE(<6 bytes>))
|
||||
|
||||
(where <BE> is big-endian packing).
|
||||
|
||||
However, thanks to Microsoft we can't have nice things. They decided to completely ignore the standard, and
|
||||
instead keep D/E as big-endian *but use little-endian* for A through C inclusive:
|
||||
|
||||
A B C D E
|
||||
HEX(LE(uint32))-HEX(LE(uint16))-HEX(LE(uint16))-HEX(BE(<uint16>), BE(<6 bytes>))
|
||||
|
||||
"Surely that had SOME reason to do that," you may say to yourself, "they wouldn't make some arbitrary formatting
|
||||
change from a standard just because."
|
||||
|
||||
You would be wrong. To my knowledge, they have never provided any technological justfification to this insanity,
|
||||
and now it's infected its way into a slew of other technologies they've had their grubby little hands involved in
|
||||
(e.g. UEFI). And it's of course too late to change.
|
||||
|
||||
So anyways here's a library to make dealing with Microsoft's hubris a little easier.
|
||||
|
||||
# Validation/Verification
|
||||
|
||||
Aside from trying to address Microsoft silliness, there are some additional functions:
|
||||
|
||||
* [Equal] for [uuid.UUID] comparison
|
||||
* [IsMaxUUID] (if a given [uuid.UUID] is an RFC 9562 [§ 5.10] UUID)
|
||||
* [IsNilUUID] (if a given [uuid.UUID] is an RFC 9562 [§ 5.9] UUID)
|
||||
* [IsValid] (If an RFC can be considered safely conformant to RFC spec)
|
||||
|
||||
# Future Incorporation/Deprecation/Obsolescence
|
||||
|
||||
Worth keeping an eye on are:
|
||||
|
||||
* https://github.com/google/uuid/pull/192
|
||||
* https://github.com/golang/go/issues/62026
|
||||
* https://github.com/golang/go/issues/76319
|
||||
(generally it's a bad idea for an API addition overall, but some good ideas were raised)
|
||||
|
||||
Some of these additions may deprecate/obsolete components of this package.
|
||||
I'll try to keep them around but mark as deprecated as they are (if they are),
|
||||
but I make no concrete promises - I hate making new major releases in Go's
|
||||
[silly module architecture] even more than I do keeping old deprecated code around.
|
||||
So caveat emptor.
|
||||
|
||||
[Microsoft's incompetence]: https://learn.microsoft.com/en-us/windows/win32/api/guiddef/ns-guiddef-guid
|
||||
[§ 5.4]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.4
|
||||
[§ 4.4]: https://datatracker.ietf.org/doc/html/rfc4122#section-4.4
|
||||
[§ 5.9]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.9
|
||||
[§ 5.10]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.10
|
||||
[github:google/uuid#192]: https://github.com/google/uuid/pull/192
|
||||
[silly module architecture]: https://go.dev/doc/modules/major-version
|
||||
*/
|
||||
package uuidx
|
||||
461
uuidx/funcs.go
Normal file
461
uuidx/funcs.go
Normal file
@@ -0,0 +1,461 @@
|
||||
package uuidx
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
/*
|
||||
DetectMsGuid tries to guess if a given [uuid.UUID] is actually a Microsoft GUID or not.
|
||||
|
||||
Note that there are technically *two* types of Microsoft GUIDs:
|
||||
|
||||
* One is predictable, and defined in RFC 9562 [§ 4.2] as a known variant.
|
||||
Detecting this is very easy and (assuming an RFC-compliant UUID is originally passed) is detectable with 100% confidence.
|
||||
It's also legacy, and Microsoft no longer uses this format. Because they are insane and enjoy the suffering of others.
|
||||
* The other, MODERN Microsoft GUID currently in use is the endianness-flipped version (see [ToggleUuidMsGuid]).
|
||||
This is impossible to 100% determine, but analysis can get *pretty* close.
|
||||
|
||||
cs is a confidence scoring. As more logic is added, it *is* mathematically possible
|
||||
(though unlikely) that cs == 0, so the caller is then responsible for making further
|
||||
guesswork based on contextual analysis ("Did I get this UUID/GUID from an Active Directory attribute?"
|
||||
"Is it a SID constant?" etc.).
|
||||
|
||||
A score > 0 indicates a confidence leaning towards the provided UUID/GUID being a Microsoft GUID.
|
||||
A score < 0 indicates a confidence leaning towards the provided UUID/GUID *not* being a Microsoft GUID.
|
||||
Note that a score of < 0 does not necessarily indicate it is a *proper, standard RFC-compliant UUID*,
|
||||
simply that it is likely NOT a Microsoft GUID. [IsRfc] will be of further help in these cases.
|
||||
|
||||
csFlip indicates a score for the [ToggleUuidMsGuid]-flipped version of u.
|
||||
It follows the same rules for thresholds and such as cs, but may be awarded different confidence levels
|
||||
internally due to different chances of false positives.
|
||||
If both cs and csFlip are > 0 but csFlip > cs, it is better to assume that u is *not* in the flipped-endian format
|
||||
but *is* a Microsoft GUID (in other words, it is likely that u has *already been flipped* to proper/consistent endianness
|
||||
instead of being a mixed-endian GUID).
|
||||
|
||||
In some cases where flipped-endianness does not matter (e.g. [IsNilUUID], [IsMaxUUID]),
|
||||
cs and csFlip will be equal.
|
||||
|
||||
*Randomly-generated* GUIDs on Windows Server 2000-family and up are almost always UUIDv4.
|
||||
Pre-Windows Server 2000 family *OR* any *statically-defined* GUIDs (schemaIDGUID, rightsGUID, CLSID constants, etc.)
|
||||
are all over the place - TYPICALLY UUIDv1, but it's nothing predictable enough to be useful in definitive classification.
|
||||
COM interfaces are all OVER the place in UUID version, but usually *not* UUIDv4.
|
||||
|
||||
A target/expected UUID version can be provided via tgtVer. To disable version analysis, use 0 (or 0x00, etc.).
|
||||
It is *highly* recommended to provide a tgtVer if it is known; it can significantly boost confidence in the correct direction.
|
||||
A warning, though - if a *wrong* tgtVer IS specified, it can negatively affect confidence accuracy.
|
||||
Thus if you aren't ABSOLUTELY certain of the target UUID version, it's better to use 0/0x00 to disable the check.
|
||||
Providing a target version is key to breaking some ties (e.g. both cs and csFlip are equal).
|
||||
For example, the given RFC-compliant UUIDv4:
|
||||
|
||||
8d8e35ae-58d2-4d28-b09d-ffffffffffff
|
||||
|
||||
when flipped evaluates to an RFC-compliant UUIDv2:
|
||||
|
||||
ae358e8d-d258-284d-b09d-ffffffffffff
|
||||
|
||||
and in this case, cs and csFlip will both end up as 0.
|
||||
Providing a tgtVer of 4 shifts this to a proper "tie-breaker" of cs == -3 and csFlip == 0.
|
||||
Similarly, the endian-flipped UUIDv4 evaluates as a UUIDv2:
|
||||
|
||||
9856ea36-c2ca-2347-af0c-3b42f76c9eca
|
||||
|
||||
from the original unflipped UUIDv4:
|
||||
|
||||
36ea5698-cac2-4723-af0c-3b42f76c9eca
|
||||
|
||||
which results in a cs == 1 and csFlip == 0 - not very high confidence (but at least a correct and non-zero lean).
|
||||
Providing a tgtVer == 4 changes this to cs == 7 and csFlip == 0, which is *much* more decisive.
|
||||
|
||||
UUIDs/GUIDs found to be strictly RFC-conforming (via [IsRfc], which returns false for Microsoft GUIDs)
|
||||
are *heavily* weighted negatively.
|
||||
|
||||
Confidence levels can be generally considered as the following:
|
||||
|
||||
cs >= 7: Likely Microsoft GUID (mixed-endian)
|
||||
cs >= 4: Likely Microsoft GUID
|
||||
0 < cs < 4: Leans Microsoft GUID, but untrusted
|
||||
cs == 0: Entirely ambiguous/indeterminate
|
||||
-4 < cs < 0: Leans UUID/non-Microsoft GUID but untrusted
|
||||
cs <= -5: Likely UUID/not Microsoft GUID
|
||||
csFlip >=cs && csFlip >= 4: Likely a pre-flipped (ToggleUuidMsGuid'd) Microsoft GUID
|
||||
|
||||
[§ 4.2]: https://datatracker.ietf.org/doc/html/rfc9562#section-4.2
|
||||
*/
|
||||
|
||||
func DetectMsGuid(u uuid.UUID, tgtVer uuid.Version) (cs, csFlip int) {
|
||||
|
||||
var isRfc bool
|
||||
var flippedRfc bool
|
||||
var flipped uuid.UUID = ToggleUuidMsGuid(u)
|
||||
|
||||
// These are the exact same when flipped, and are statically defined.
|
||||
if IsNilUUID(u) || IsMaxUUID(u) {
|
||||
cs = -12
|
||||
csFlip = -12
|
||||
return
|
||||
}
|
||||
|
||||
// Most/all(?) Microsoft GUIDs are not NCS.
|
||||
if IsNcs(u) {
|
||||
cs -= 2
|
||||
}
|
||||
if IsNcs(flipped) {
|
||||
// The flipped has a higher likelihood of false-pos, so we don't score it as confidently.
|
||||
csFlip -= 1
|
||||
}
|
||||
|
||||
if u.Version() == 0 {
|
||||
if u.Variant() == uuid.Microsoft {
|
||||
cs += 10
|
||||
} else {
|
||||
cs -= 2
|
||||
}
|
||||
}
|
||||
if flipped.Version() == 0 {
|
||||
if flipped.Variant() == uuid.Microsoft {
|
||||
csFlip += 4
|
||||
} else {
|
||||
csFlip -= 1
|
||||
}
|
||||
}
|
||||
|
||||
// Valid RFC version and variant. IsRfc returns false for the Microsoft Variant and version == 0.
|
||||
// Modern MS uses an RFC 4122 variant indicator but flips the endianness.
|
||||
isRfc, _ = IsRfc(u)
|
||||
flippedRfc, _ = IsRfc(flipped)
|
||||
if u.Variant() == uuid.RFC4122 { // This might be the strongest indicator.
|
||||
if isRfc && !flippedRfc {
|
||||
// This is *very* strong of being an MS GUID.
|
||||
cs -= 8
|
||||
csFlip += 4
|
||||
} else if !isRfc && flippedRfc {
|
||||
// It probably is an MS GUID but was already flipped.
|
||||
csFlip += 6
|
||||
} else if isRfc && flippedRfc {
|
||||
/*
|
||||
If both are RFC-compat, it's a weird case where
|
||||
it actually IS RFC compliant and by chance the flipped is *also* RFC compat.
|
||||
An example of this is:
|
||||
8d8e35ae-58d2-4d28-b09d-ffffffffffff
|
||||
Which has the flipped version of:
|
||||
ae358e8d-d258-284d-b09d-ffffffffffff
|
||||
The original is a v4, the flipped evaluates as a v2!
|
||||
|
||||
Providing a target version breaks this away to a definitive score.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
// *HEAVILY* weigh a provided version.
|
||||
if tgtVer != 0 {
|
||||
// NCS does some weird things to the versioning field. We return early on it though.
|
||||
// MS GUIDs have a pretty small chance of matching,
|
||||
// but their flipped counterpart SHOULD match versions.
|
||||
if flipped.Version() == tgtVer {
|
||||
cs += 7
|
||||
} else {
|
||||
cs -= 3
|
||||
}
|
||||
} else {
|
||||
// Give a *very small* boost to flippedRfc and flipped.Version() == 4, since it's so common.
|
||||
// Don't make this too high though since the version is explicitly specified as unknown.
|
||||
if flippedRfc && flipped.Version() == 4 {
|
||||
cs += 1
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
Equal returns `true` if [uuid.UUID] the two provided [uuid.UUID] are the same.
|
||||
|
||||
Currently it just wraps:
|
||||
|
||||
eq = a == b
|
||||
|
||||
but is provided as a safety guarantee if the underlying structures/types should change.
|
||||
*/
|
||||
func Equal(a, b uuid.UUID) (eq bool) {
|
||||
|
||||
eq = a == b
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsFlippedEndian can be used to check if [uuid.UUID] is a direct endian-flipped ([ToggleUuidMsGuid])
|
||||
of b (or vice versa, obviously).
|
||||
|
||||
It simply wraps:
|
||||
|
||||
isFlipped = Equal(a, ToggleUuidMsGuid(b))
|
||||
|
||||
but can be useful for shorthand/readability.
|
||||
*/
|
||||
func IsFlippedEndian(a, b uuid.UUID) (isFlipped bool) {
|
||||
|
||||
isFlipped = Equal(a, ToggleUuidMsGuid(b))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsMaxUUID returns `true` if the specified UUID is explicitly an RFC-defined
|
||||
"Max UUID". (You may also see it specified in some places as the "Omni UUID".)
|
||||
|
||||
For details, see RFC 9562 [§ 5.10].
|
||||
|
||||
[§ 5.10]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.10
|
||||
*/
|
||||
func IsMaxUUID(u uuid.UUID) (isMax bool) {
|
||||
|
||||
isMax = u == uuid.Max
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsMsGuid wraps
|
||||
|
||||
if cmp, _ = DetectMsGuid(msGUID, tgtVer); cmp < -3 {
|
||||
isMs = true
|
||||
}
|
||||
|
||||
Note that [uuid.Microsoft] is an actual RFC-defined variant, but *Microsoft no longer uses it*
|
||||
and in MODERN implementations do the endianness flip [ToggleUuidMsGuid] of (USUALLY) a UUIDv4.
|
||||
|
||||
See [DetectMsGuid] for a more in-depth result that will let you use the confidence level directly,
|
||||
and for details on the weird things that can go wrong with this guesswork.
|
||||
|
||||
Note that this won't be 100% reliable due to math things, but it should be reliable enough most of the time.
|
||||
|
||||
See also [MsGuidToUuid] and [UuidToMsGuid].
|
||||
*/
|
||||
func IsMsGuid(msGUID uuid.UUID, tgtVer uuid.Version) (isMs bool) {
|
||||
|
||||
var cmp int
|
||||
|
||||
if cmp, _ = DetectMsGuid(msGUID, tgtVer); cmp < -3 {
|
||||
isMs = true
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsNcs is shorthand for:
|
||||
|
||||
isNcs = u.Variant() == uuid.Reserved
|
||||
|
||||
See also the notes in [IsRfc].
|
||||
*/
|
||||
func IsNcs(u uuid.UUID) (isNcs bool) {
|
||||
|
||||
// https://archive.org/details/networkcomputing0000zahn/page/10/mode/1up
|
||||
|
||||
isNcs = u.Variant() == uuid.Reserved
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsNilUUID returns `true` if the specified UUID is explicitly an RFC-defined
|
||||
"Nil UUID".
|
||||
|
||||
For details, see RFC 9562 [§ 5.9].
|
||||
|
||||
[§ 5.9]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.9
|
||||
*/
|
||||
func IsNilUUID(u uuid.UUID) (isNil bool) {
|
||||
|
||||
isNil = u == uuid.Nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsRfc returns `true` if the specified UUID is a proper standard RFC UUID.
|
||||
|
||||
Because Microsoft is insane, rfc will be false even if it's a (legacy) Microsoft form
|
||||
of an RFC UUID. Use [IsMsGuid] for that.
|
||||
|
||||
In the special case of u being a valid NCS UUID, rfc will be false but gen will be [Rfc4122].
|
||||
This is because RFC 9652 deprecates the NCS UUID. See [IsNcs].
|
||||
(You are highly unlikely to encounter an NCS UUID "in the wild" unless you are receiving
|
||||
a UUID from someone who severely misunderstands that UUIDs are structured/versioned/typed
|
||||
and thinks they're just random byes in hex with hyphens in certain places.)
|
||||
(They aren't that, if you're one of those someones.)
|
||||
|
||||
Nil UUID ([IsNilUUID]) and Max UUID ([IsMaxUUID]) return true with RFCs 4122 and RFC 9562 respectively.
|
||||
*/
|
||||
func IsRfc(u uuid.UUID) (rfc bool, gen RfcGen) {
|
||||
|
||||
if IsNilUUID(u) {
|
||||
rfc = true
|
||||
gen = Rfc4122
|
||||
return
|
||||
}
|
||||
if IsMaxUUID(u) {
|
||||
rfc = true
|
||||
gen = Rfc9562
|
||||
return
|
||||
}
|
||||
if IsNcs(u) {
|
||||
gen = Rfc4122
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Are there any sub-version checks that can be applied?
|
||||
switch u.Variant() {
|
||||
case uuid.Invalid, uuid.Microsoft, uuid.Future:
|
||||
return
|
||||
case uuid.RFC4122:
|
||||
if !(0x01 <= u.Version() && u.Version() <= 0x08) {
|
||||
return
|
||||
}
|
||||
rfc = true
|
||||
gen = Rfc4122
|
||||
// 4122 only covers UUIDv1 through UUIDv5.
|
||||
if 0x06 <= u.Version() && u.Version() <= 0x08 {
|
||||
gen = Rfc9562
|
||||
}
|
||||
default: // Safety net in case upstream adds a uuid.RFC9562 variant or something.
|
||||
if !(0x01 <= u.Version() && u.Version() <= 0x08) {
|
||||
return
|
||||
}
|
||||
if u.Variant() < uuid.Future {
|
||||
return
|
||||
}
|
||||
rfc = true
|
||||
gen = RfcNone
|
||||
// 4122 only covers UUIDv1 through UUIDv5.
|
||||
if 0x06 <= u.Version() && u.Version() <= 0x08 {
|
||||
gen = Rfc9562
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
IsValid indicates if the given [uuid.UUID] strictly conforms to RFC.
|
||||
|
||||
A Nil UUID (as in RFC 9562 [§ 5.9], not a `nil` *uuid.UUID) will return `true`
|
||||
as it IS technically defined per RFC despite not conforming to a version.
|
||||
Use [IsNilUUID] to further determine that.
|
||||
|
||||
Likewise, a Max UUID (RFC 9562 [§ 5.10]) will return `true` as it is also
|
||||
defined per RFC despite not conforming to a version.
|
||||
Use [IsMaxUUID] to further determine that.
|
||||
|
||||
Microsoft GUIDs will always return false since they defy RFC.
|
||||
Use [IsMsGuid] to check for that condition.
|
||||
|
||||
[§ 5.9]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.9
|
||||
[§ 5.10]: https://datatracker.ietf.org/doc/html/rfc9562#section-5.10
|
||||
*/
|
||||
func IsValid(u uuid.UUID) (valid bool) {
|
||||
|
||||
if IsNilUUID(u) {
|
||||
valid = true
|
||||
return
|
||||
}
|
||||
if IsMaxUUID(u) {
|
||||
valid = true
|
||||
return
|
||||
}
|
||||
|
||||
switch u.Variant() {
|
||||
case uuid.Invalid, uuid.Reserved, uuid.Microsoft, uuid.Future:
|
||||
return
|
||||
case uuid.RFC4122:
|
||||
valid = true
|
||||
// TODO: If they add an RFC9562 or something, need a case here.
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
// If we got here, it *should* be RFC.
|
||||
if valid, _ = IsRfc(u); !valid {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
MsGuidToUuid converts a Microsoft GUID to a UUID.
|
||||
|
||||
If [IsMsGuid] is false for msGUID, u will be equal to msGUID.
|
||||
|
||||
See [UuidToMsGuid] for the inverse, and [IsRfc] to check
|
||||
if the result is a strictly conforming UUID.
|
||||
*/
|
||||
func MsGuidToUuid(msGUID uuid.UUID) (u uuid.UUID) {
|
||||
|
||||
if !IsMsGuid(msGUID, 0x00) {
|
||||
u = msGUID
|
||||
return
|
||||
}
|
||||
u = ToggleUuidMsGuid(msGUID)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
ToggleUuidMsGuid switches the src to it's "other" format:
|
||||
|
||||
* if it's a Microsoft GUID, it will be converted to a UUID
|
||||
* if it's a UUID, it will be converted to a Microsoft GUID
|
||||
|
||||
No detection ([IsRfc], [IsMsGuid], etc.) nor validation/verification ([IsValid]) is performed,
|
||||
which is why this is a "toggle" - it just flips some endianness for certain byte ranges.
|
||||
|
||||
If you prefer something a little more explicit, see [MsGuidToUuid] and/or [UuidToMsGuid].
|
||||
Alternatively call [IsMsGuid] or [IsRfc] directly.
|
||||
*/
|
||||
func ToggleUuidMsGuid(orig uuid.UUID) (converted uuid.UUID) {
|
||||
|
||||
var cb [16]byte
|
||||
var ob [16]byte = orig
|
||||
|
||||
// Can just directly map the allocations;
|
||||
// the operation is the exact same regardless of whether the original is RFC and target is MS or vice versa.
|
||||
cb = [16]byte{
|
||||
// THESE GET ENDIAN-SWAPPED
|
||||
ob[3], ob[2], ob[1], ob[0], // "A"
|
||||
ob[5], ob[4], // "B"
|
||||
ob[7], ob[6], // "C"
|
||||
// THESE STAY THE SAME (should be BE for both)
|
||||
ob[8], ob[9], ob[10], ob[11], // "D"
|
||||
ob[12], ob[13], ob[14], ob[15], // "E"
|
||||
}
|
||||
|
||||
converted = uuid.UUID(cb)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
UuidToMsGuid converts a UUID to a Microsoft GUID.
|
||||
|
||||
If [DetectMsGuid] indicates a good likelihood for u already being a Microsoft GUID
|
||||
(greater than or equal) to [MsGuidThreshold], msGUID will be equal to u.
|
||||
(If it detects it as unflipped endianness, it will automatically be flipped by this function.)
|
||||
|
||||
See [MsGuidToUuid] for the inverse.
|
||||
*/
|
||||
func UuidToMsGuid(u uuid.UUID) (msGUID uuid.UUID) {
|
||||
|
||||
var msCmp int
|
||||
var flipped int
|
||||
|
||||
if msCmp, flipped = DetectMsGuid(u, 0x00); msCmp >= MsGuidThreshold && msCmp > flipped {
|
||||
msGUID = u
|
||||
return
|
||||
}
|
||||
msGUID = ToggleUuidMsGuid(u)
|
||||
|
||||
return
|
||||
}
|
||||
22
uuidx/funcs_rfcgen.go
Normal file
22
uuidx/funcs_rfcgen.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package uuidx
|
||||
|
||||
// String conforms an [RfcGen] to a [fmt.Stringer] interface.
|
||||
func (g *RfcGen) String() (s string) {
|
||||
|
||||
if g == nil {
|
||||
s = "UNSPECIFIED_NIL"
|
||||
}
|
||||
|
||||
switch *g {
|
||||
case RfcNone:
|
||||
s = "INVALID"
|
||||
case Rfc4122:
|
||||
s = "RFC 4122"
|
||||
case Rfc9562:
|
||||
s = "RFC 9562"
|
||||
default:
|
||||
s = "UNKNOWN"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
5
uuidx/types.go
Normal file
5
uuidx/types.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package uuidx
|
||||
|
||||
type (
|
||||
RfcGen uint8
|
||||
)
|
||||
Reference in New Issue
Block a user