Compare commits

..

8 Commits

Author SHA1 Message Date
brent saner
c6efc2d83c
v1.12.0
FIXED:
* paths: Async searching works correctly now, and is consolidated to a
  single struct for searching options for async and synchronous
  searches.
2024-11-18 17:36:14 -05:00
brent saner
eefe02afaf
v1.11.0
ADDED:
* fsutils: better/additional fsattrs functionality
* paths: highly filterable filesystem searching
2024-11-16 01:28:24 -05:00
brent saner
b82f0c02ed
v1.10.1
FIX:
* fs.FileMode for object type is 0 for regular files, so an additional
  parameter is needed.
2024-11-12 06:50:44 -05:00
brent saner
903dd00c81
v1.10.0
ADDED:
* paths.SearchFsPaths, which lets a user provide a fairly flexible
  function for searching files/directories/etc.
2024-11-12 06:32:04 -05:00
brent saner
70a88ca8b4
v1.9.0
IMPROVED:
* Removed *BROKEN* dep. lrn2fixurshitk
2024-11-07 04:15:45 -05:00
brent saner
9dbc3a00fe
v1.8.2
Fix bad tag/version/go.mod
2024-10-29 12:19:54 -04:00
brent saner
e9b7c5539a
v1.8.1
ADDED:
* A way to actually use Auger externally. lel.
2024-10-29 12:17:05 -04:00
brent saner
236165bec8
v1.8.0
API CHANGES:
* Technically should be a v2.x, but cryptparse has been moved to its own
  module: r00t2.io/cryptparse
2024-10-17 15:34:26 -04:00
39 changed files with 1260 additions and 1755 deletions

3
.gitignore vendored
View File

@ -29,6 +29,9 @@
# Test binary, built with `go test -c`
*.test

# Test file
fsutils/testfile

# Output of the go coverage tool, specifically when used with LiteIDE
*.out


1
auger/TODO Normal file
View File

@ -0,0 +1 @@
This module is still under work.

View File

@ -7,3 +7,35 @@ const (
augInclTfm string = "incl" // The transformer keyword for Augeas includes.
augAppendSuffix string = "[last()+1]"
)

var (
dstPtrTrue bool = true
dstPtrFalse bool = false
)

var (
// PtrTrue and PtrFalse are convenience references for constructing an AugFlags if needed. It is recommended you do not change these values if you do not like being confused.
PtrTrue *bool = &dstPtrTrue
PtrFalse *bool = &dstPtrFalse
)

/*
IncludeOptNone is the default include recursion option for Aug.RecursiveInclude.
* No special behavior is defined
* All include directives are assumed to refer:
* Explicitly/exclusively to file paths
* That must exist
*/
const IncludeOptNone includeOpt = 0
const (
// IncludeOptNoExist specifies that inclusions are allowed to not exist, otherwise an error will be raised while attempting to parse them.
IncludeOptNoExist includeOpt = 1 << iota
// IncludeOptGlobbing indicates that the inclusion system supports globbing (as supported by (github.com/gobwas/glob).Match).
IncludeOptGlobbing
// IncludeOptRegex indicates that the inclusion system supports matching by regex (as supported by regexp).
IncludeOptRegex
// IncludeOptDirs indicates that the inclusion system supports matching by directory.
IncludeOptDirs
// IncludeOptDirsRecursive indicates that the inclusion system also recurses into subdirectories of matched directories. Only used if IncludeOptDirs is also set.
IncludeOptDirsRecursive
)

View File

@ -4,15 +4,55 @@ import (
`io/fs`
`os`
`strings`

`honnef.co/go/augeas`
`r00t2.io/goutils/bitmask`
)

/*
AugpathToFspath returns the filesystem path from an Augeas path.
NewAuger returns an auger.Aug.

See:
https://pkg.go.dev/honnef.co/go/augeas#readme-examples
https://pkg.go.dev/honnef.co/go/augeas#New
for the `root` and `loadPath` parameters
(and, by extension, the `flags` paraemter; note that the `flags`
is an auger.AugFlags, not an augeas.Flag!).

`flags` may be nil.
*/
func NewAuger(root, loadPath string, flags *AugFlags) (aug *Aug, err error) {

aug = new(Aug)

if aug.aug, err = augeas.New(root, loadPath, flags.Eval()); err != nil {
return
}

return
}

// NewAugerFromAugeas returns a wrapped auger.Aug from a (honnef.co/go/augeas).Augeas.
func NewAugerFromAugeas(orig augeas.Augeas) (aug *Aug) {

aug = new(Aug)
aug.aug = orig

return
}

/*
AugpathToFspath returns the filesystem path (i.e. an existing file) from an Augeas path.

It is *required* and expected that the Augeas standard /files prefix be removed first;
if not, it is assumed to be part of the filesystem path.

If a valid path cannot be determined, fsPath will be empty.

To be clear, a file must exist for fsPath to not be empty;
the way AugpathToFsPath works is it recurses bottom-up a
given path and checks for the existence of a file,
continuing upwards if not found.
*/
func AugpathToFspath(augPath string) (fsPath string, err error) {

@ -61,3 +101,11 @@ func dedupePaths(new, existing []string) (missing []string) {

return
}

// getInclPaths applies path options to inclusions.
func getInclPaths(pathSpec string, inclFlags *bitmask.MaskBit) (fpaths []string, err error) {

// TODO

return
}

View File

@ -12,6 +12,7 @@ import (
`github.com/davecgh/go-spew/spew`
`github.com/google/shlex`
`honnef.co/go/augeas`
`r00t2.io/goutils/bitmask`
`r00t2.io/sysutils/paths`
)

@ -146,10 +147,21 @@ breakCmd:
An error will be returned if augLens is a nonexistent or not-loaded Augeas lens module.

Depending on how many files there are and whether globs vs. explicit filepaths are included, this may take a while.
*/
func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string) (err error) {

if err = a.addIncl(includeDirective, augLens, fsRoot, nil); err != nil {
optFlags may be nil, multiple includeOpt (see the IncludeOpt* constants) as variadic parameters/expanded slice,
bitwise-OR'd together, or multiple non-OR'd and OR'd together (all will be combined to a single value).
*/
func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string, optFlags ...includeOpt) (err error) {

var flags *bitmask.MaskBit = bitmask.NewMaskBit()

if optFlags != nil && len(optFlags) > 0 {
for _, f := range optFlags {
flags.AddFlag(f.toMb())
}
}

if err = a.addIncl(includeDirective, augLens, fsRoot, nil, flags); err != nil {
return
}

@ -164,14 +176,16 @@ func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string) (err er
newInclPaths are new filesystem paths/Augeas-compatible glob patterns to load into the filetree and recurse into.
They may be nil, especially if the first run.
*/
func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPaths []string) (err error) {
func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPaths []string, inclFlags *bitmask.MaskBit) (err error) {

var matches []string // Passed around set of Augeas matches.
var exists bool // Used to indicate if the include path exists.
var includes []string // Filepath(s)/glob(s) from fetching includeDirective in lensInclPath. These are internal to the application but are recursed.
var lensInclPath string // The path of the included paths in the tree. These are internal to Augeas, not the application.
var appendPath string // The path for new Augeas includes.
var match []string // A placeholder for iterating when populating includes.
var fpath string // A placeholder for finding the path of a conf file that contains an includeDirective.
var normalizedIncludes []string // A temporary slice to hold normalization operations and other dynamic building.
var lensPath string = fmt.Sprintf(augLensTpl, augLens) // The path of the lens (augLens) itself.
var augErr *augeas.Error = new(augeas.Error) // We use this to skip "nonexistent" lens.

@ -193,7 +207,7 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa

// First canonize paths.
if newInclPaths != nil && len(newInclPaths) > 0 {
// Existing includes. We don't return on an empty lensInclPath because
// Existing includes. We don't return on an empty lensInclPath.
if matches, err = a.aug.Match(lensInclPath); err != nil {
if errors.As(err, augErr) && augErr.Code == augeas.NoMatch {
err = nil
@ -221,6 +235,17 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa
// We don't want to bother adding multiple incl's for the same path(s); it can negatively affect Augeas loads.
newInclPaths = dedupePaths(newInclPaths, matches)

// And then apply things like recursion, globbing, etc.
normalizedIncludes = make([]string, 0, len(newInclPaths))
if inclFlags.HasFlag(IncludeOptGlobbing.toMb()) {
// TODO
/*
if strings.Contains(newInclPaths[idx], "*") {

}
*/
}

// Add the new path(s) as Augeas include entries.
if newInclPaths != nil {
for _, fsPath := range newInclPaths {
@ -285,10 +310,13 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa
}

if matches != nil && len(matches) != 0 {
if err = a.addIncl(includeDirective, augLens, fsRoot, matches); err != nil {
if err = a.addIncl(includeDirective, augLens, fsRoot, matches, inclFlags); err != nil {
return
}
}

// TODO
_, _ = exists, normalizedIncludes

return
}

View File

@ -7,6 +7,10 @@ import (
// Eval returns an evaluated set of flags.
func (a *AugFlags) Eval() (augFlags augeas.Flag) {

if a == nil {
return
}

augFlags = augeas.None

if a.Backup != nil && *a.Backup {

13
auger/funcs_includeopt.go Normal file
View File

@ -0,0 +1,13 @@
package auger

import (
`r00t2.io/goutils/bitmask`
)

// toMb returns a bitmask.MaskBit of this includeOpt.
func (i includeOpt) toMb() (mb bitmask.MaskBit) {

mb = bitmask.MaskBit(i)

return
}

39
auger/funcs_test.go Normal file
View File

@ -0,0 +1,39 @@
package auger

import (
"testing"

`honnef.co/go/augeas`
)

func TestNewAuger(t *testing.T) {

var aug *Aug
var augUnder augeas.Augeas
var err error

if aug, err = NewAuger("/", "", nil); err != nil {
t.Fatal(err)
}

augUnder = aug.aug

aug = NewAugerFromAugeas(augUnder)

_ = aug
}

func TestRecursiveInclude(t *testing.T) {

var aug *Aug
var err error

if aug, err = NewAuger("/", "", &AugFlags{DryRun: PtrTrue}); err != nil {
t.Fatal(err)
}

// This requires Nginx to be installed and with a particularly complex nested include system.
if err = aug.RecursiveInclude("Nginx", "include", "/etc/nginx"); err != nil {
t.Fatal(err)
}
}

View File

@ -2,8 +2,11 @@ package auger

import (
`honnef.co/go/augeas`
`r00t2.io/goutils/bitmask`
)

type includeOpt bitmask.MaskBit

// Aug is a wrapper around (honnef.co/go/)augeas.Augeas. Remember to call Aug.Close().
type Aug struct {
aug augeas.Augeas

View File

@ -1,3 +0,0 @@
- PKCS#12/PFX parsing/support

- Move to struct tags and reflection, so it can not only be easier to maintain in the future but also be implemented in custom structs downstream.

View File

@ -1,134 +0,0 @@
package cryptparse

import (
`crypto/tls`

`github.com/go-playground/validator/v10`
)

var (
tlsVerNmToUint map[string]uint16
tlsCipherNmToUint map[string]uint16
tlsCurveNmToCurve map[string]tls.CurveID
)

const (
MaxTlsCipher uint16 = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
MaxCurveId tls.CurveID = tls.X25519 // 29
MinTlsVer uint16 = tls.VersionSSL30
MaxTlsVer uint16 = tls.VersionTLS13
DefaultNetType string = "tcp"
)

// TlsUriParam* specifiy URL query parameters to parse a tls:// URI, and are used by TlsUri methods.
const (
/*
TlsUriParamCa specifies a path to a CA certificate PEM-encded DER file.

It may be specified multiple times in a TLS URI.
*/
TlsUriParamCa string = "pki_ca"
/*
TlsUriParamCert specifies a path to a client certificate PEM-encded DER file.

It may be specified multiple times in a TLS URI.
*/
TlsUriParamCert string = "pki_cert"
/*
TlsUriParamKey specifies a path to a private key as a PEM-encded file.

It may be PKCS#1, PKCS#8, or PEM-encoded ASN.1 DER EC key.

Supported private key types are RSA, ED25519, ECDSA, and ECDH.

It may be specified multiple times in a TLS URI.
*/
TlsUriParamKey string = "pki_key"
/*
TlsUriParamNoVerify, if `1`, `yes`, `y`, or `true` indicate
that the TLS connection should not require verification of
the remote end (e.g. hostname matches, trusted chain, etc.).

Any other value for this parameter will be parsed as "False"
(meaning the remote end's certificate SHOULD be verified).

Only the first defined instance is parsed.
*/
TlsUriParamNoVerify string = "no_verify"
/*
TlsUriParamSni indicates that the TLS connection should expect this hostname
instead of the hostname specified in the URI itself.

Only the first defined instance is parsed.
*/
TlsUriParamSni string = "sni"
/*
TlsUriParamCipher specifies one (or more) cipher(s)
to specify for the TLS connection cipher negotiation.
Note that TLS 1.3 has a fixed set of ciphers, and
this list may not be respected by the remote end.

The string may either be the name (as per
https://www.iana.org/assignments/tls-parameters/tls-parameters.xml)
or an int (normal, hex, etc. string representation).

It may be specified multiple times in a TLS URI.
*/
TlsUriParamCipher string = "cipher"
/*
TlsUriParamCurve specifies one (or more) curve(s)
to specify for the TLS connection cipher negotiation.

It may be specified multiple times in a TLS URI.
*/
TlsUriParamCurve string = "curve"
/*
TlsUriParamMinTls defines the minimum version of the
TLS protocol to use.
It is recommended to use "TLS_1.3".

Supported syntax formats include:

* TLS_1.3
* 1.3
* v1.3
* TLSv1.3
* 0x0304 (legacy_version, see RFC8446 § 4.1.2)
* 774 (0x0304 in int form)
* 0o1404 (0x0304 in octal form)

All evaluate to TLS 1.3 in this example.

Only the first defined instance is parsed.
*/
TlsUriParamMinTls string = "min_tls"
/*
TlsUriParamMaxTls defines the minimum version of the
TLS protocol to use.

See TlsUriParamMinTls for syntax of the value.

Only the first defined instance is parsed.
*/
TlsUriParamMaxTls string = "max_tls"
/*
TlsUriParamNet is used by TlsUri.ToConn and TlsUri.ToTlsConn to explicitly specify a network.

The default is "tcp".

See net.Dial()'s "network" parameter for valid network types.

Only the first defined instance is parsed.
*/
TlsUriParamNet string = "net"
)

var (
paramBoolValsTrue []string = []string{
"1", "yes", "y", "true",
}
paramBoolValsFalse []string = []string{
"0", "no", "n", "false",
}
validate *validator.Validate = validator.New(validator.WithRequiredStructEnabled())
)

6
cryptparse/doc.go Normal file
View File

@ -0,0 +1,6 @@
/*
CRYPTPARSE HAS MOVED.

It is now its own module: r00t2.io/cryptparse
*/
package cryptparse

View File

@ -1,13 +0,0 @@
package cryptparse

import (
`errors`
)

var (
ErrBadTlsCipher error = errors.New("invalid TLS cipher suite")
ErrBadTlsCurve error = errors.New("invalid TLS curve")
ErrBadTlsVer error = errors.New("invalid TLS version")
ErrUnknownCipher error = errors.New("unknown TLS cipher")
ErrUnknownKey error = errors.New("unknown key type")
)

View File

@ -1,826 +0,0 @@
package cryptparse

import (
`bytes`
`crypto`
`crypto/ecdh`
`crypto/ecdsa`
`crypto/ed25519`
`crypto/rsa`
`crypto/tls`
`crypto/x509`
`encoding/pem`
`errors`
`net/url`
`os`
`strconv`
`strings`

`r00t2.io/sysutils/paths`
)

// FromURL returns a *TlsUri from a *url.URL.
func FromURL(u *url.URL) (t *TlsUri) {

var newU *url.URL

if u == nil {
return
}

newU = new(url.URL)
*newU = *u
if u.User != nil {
newU.User = new(url.Userinfo)
*newU.User = *u.User
}

newU.Scheme = "tls"

t = &TlsUri{
URL: newU,
}

return
}

// IsMatchedPair returns true if the privateKey is paired with the cert.
func IsMatchedPair(privKey crypto.PrivateKey, cert *x509.Certificate) (isMatched bool, err error) {

var pubkey crypto.PublicKey

if cert == nil || privKey == nil {
return
}

pubkey = cert.PublicKey

switch k := privKey.(type) {
case *rsa.PrivateKey:
if p, ok := pubkey.(*rsa.PublicKey); ok {
isMatched = k.PublicKey.Equal(p)
return
}
case ed25519.PrivateKey:
if p, ok := pubkey.(ed25519.PublicKey); ok {
// Order is flipped here because unlike the other key types, an ed25519.PrivateKey is just a []byte.
isMatched = p.Equal(k.Public())
return
}
case *ecdh.PrivateKey:
if p, ok := pubkey.(*ecdh.PublicKey); ok {
isMatched = k.PublicKey().Equal(p)
return
}
case *ecdsa.PrivateKey:
if p, ok := pubkey.(*ecdsa.PublicKey); ok {
isMatched = k.PublicKey.Equal(p)
return
}
}

// If we got here, we can't determine either the private key type or the cert's public key type.
err = ErrUnknownKey

return
}

/*
ParseTlsCipher parses string s and attempts to derive a TLS cipher suite (as a uint16) from it.
Use ParseTlsCipherSuite if you wish for a tls.CipherSuite instead.

The string may either be the name (as per https://www.iana.org/assignments/tls-parameters/tls-parameters.xml)
or an int (normal, hex, etc. string representation).

If none is found, the default is MaxTlsCipher.
*/
func ParseTlsCipher(s string) (cipherSuite uint16, err error) {

var nm string
var n uint64
var i uint16
var ok bool

if n, err = strconv.ParseUint(s, 10, 16); err != nil {
if errors.Is(err, strconv.ErrSyntax) {
// It's a name; parse below.
err = nil
} else {
return
}
} else {
// It's a number.
if nm = tls.CipherSuiteName(uint16(n)); strings.HasPrefix(nm, "0x") {
// ...but invalid.
err = ErrBadTlsCipher
return
} else {
// Valid (as number). Return it.
cipherSuite = uint16(n)
return
}
}

s = strings.ToUpper(s)
s = strings.ReplaceAll(s, " ", "_")

// We build a dynamic map of cipher suite names to uint16s (if not already created).
if tlsCipherNmToUint == nil {
tlsCipherNmToUint = make(map[string]uint16)
for i = 0; i <= MaxTlsCipher; i++ {
if nm = tls.CipherSuiteName(i); !strings.HasPrefix(nm, "0x") {
tlsCipherNmToUint[nm] = i
}
}
}

cipherSuite = MaxTlsCipher
if i, ok = tlsCipherNmToUint[s]; ok {
cipherSuite = i
}

return
}

// ParseTlsCipherStrict is like ParseTlsCipher, but an ErrBadTlsCipher or ErrUnknownCipher error will be raised if no matching cipher is found.
func ParseTlsCipherStrict(s string) (cipherSuite uint16, err error) {

var nm string
var n uint64
var i uint16
var ok bool

if n, err = strconv.ParseUint(s, 10, 16); err != nil {
if errors.Is(err, strconv.ErrSyntax) {
// It's a name; parse below.
err = nil
} else {
return
}
} else {
// It's a number.
if nm = tls.CipherSuiteName(uint16(n)); strings.HasPrefix(nm, "0x") {
// ...but invalid.
err = ErrBadTlsCipher
return
} else {
// Valid (as number). Return it.
cipherSuite = uint16(n)
return
}
}

s = strings.ToUpper(s)
s = strings.ReplaceAll(s, " ", "_")

// We build a dynamic map of cipher suite names to uint16s (if not already created).
if tlsCipherNmToUint == nil {
tlsCipherNmToUint = make(map[string]uint16)
for i = 0; i <= MaxTlsCipher; i++ {
if nm = tls.CipherSuiteName(i); !strings.HasPrefix(nm, "0x") {
tlsCipherNmToUint[nm] = i
}
}
}

if i, ok = tlsCipherNmToUint[s]; ok {
cipherSuite = i
} else {
err = ErrUnknownCipher
}

return
}

/*
ParseTlsCiphers parses s as a comma-separated list of cipher suite names/integers and returns a slice of suites.

See ParseTlsCipher for details, as this is mostly just a wrapper around it.

If no cipher suites are found, cipherSuites will only contain MaxTlsCipher.
*/
func ParseTlsCiphers(s string) (cipherSuites []uint16) {

var suiteNms []string
var cipher uint16
var err error

suiteNms = strings.Split(s, ",")
cipherSuites = make([]uint16, 0, len(suiteNms))

for _, nm := range suiteNms {
if cipher, err = ParseTlsCipher(nm); err != nil {
err = nil
continue
}
cipherSuites = append(cipherSuites, cipher)
}

if len(cipherSuites) == 0 {
cipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256}
}

return
}

// ParseTlsCipherSuite is like ParseTlsCipher but returns a *tls.CipherSuite instead of a uint16 TLS cipher identifier.
func ParseTlsCipherSuite(s string) (cipherSuite *tls.CipherSuite, err error) {

var cipherId uint16

if cipherId, err = ParseTlsCipher(s); err != nil {
return
}

for _, v := range tls.CipherSuites() {
if v.ID == cipherId {
cipherSuite = v
return
}
}
for _, v := range tls.InsecureCipherSuites() {
if v.ID == cipherId {
cipherSuite = v
return
}
}

return
}

// ParseTlsCipherSuiteStrict is like ParseTlsCipherSuite, but an ErrBadTlsCipher or ErrUnknownCipher error will be raised if no matching cipher is found.
func ParseTlsCipherSuiteStrict(s string) (cipherSuite *tls.CipherSuite, err error) {

var cipherId uint16

if cipherId, err = ParseTlsCipherStrict(s); err != nil {
return
}

for _, v := range tls.CipherSuites() {
if v.ID == cipherId {
cipherSuite = v
return
}
}
for _, v := range tls.InsecureCipherSuites() {
if v.ID == cipherId {
cipherSuite = v
return
}
}

return
}

// ParseTlsCipherSuites is like ParseTlsCiphers but returns a []*tls.CipherSuite instead of a []uint16 of TLS cipher identifiers.
func ParseTlsCipherSuites(s string) (cipherSuites []*tls.CipherSuite, err error) {

var found bool
var cipherIds []uint16

cipherIds = ParseTlsCiphers(s)

for _, cipherId := range cipherIds {
found = false
for _, v := range tls.CipherSuites() {
if v.ID == cipherId {
cipherSuites = append(cipherSuites, v)
found = true
break
}
}
if !found {
for _, v := range tls.InsecureCipherSuites() {
if v.ID == cipherId {
cipherSuites = append(cipherSuites, v)
break
}
}
}
}

return
}

/*
ParseTlsCurve parses string s and attempts to derive a tls.CurveID from it.

The string may either be the name (as per // https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8)
or an int (normal, hex, etc. string representation).
*/
func ParseTlsCurve(s string) (curve tls.CurveID, err error) {

var i tls.CurveID
var n uint64
var ok bool

if n, err = strconv.ParseUint(s, 10, 16); err != nil {
if errors.Is(err, strconv.ErrSyntax) {
// It's a name; parse below.
err = nil
} else {
return
}
} else {
// It's a number.
if strings.HasPrefix(tls.CurveID(uint16(n)).String(), "CurveID(") {
// ...but invalid.
err = ErrBadTlsCurve
return
} else {
// Valid (as number). Return it.
curve = tls.CurveID(uint16(n))
return
}
}

// It seems to be a name. Normalize...
s = strings.ToUpper(s)

// Unfortunately there's no "tls.CurveIDName()" function.
// They do have a .String() method though.
if tlsCurveNmToCurve == nil {
tlsCurveNmToCurve = make(map[string]tls.CurveID)
for i = 0; i <= MaxCurveId; i++ {
if strings.HasPrefix(i.String(), "CurveID(") {
continue
}
tlsCurveNmToCurve[i.String()] = i
// It's normally mixed-case; we want to be able to look it up in a normalized all-caps as well.
tlsCurveNmToCurve[strings.ToUpper(i.String())] = i
// The normal name, except for X25519, has "Curve" in the front. We add it without that prefix as well.
tlsCurveNmToCurve[strings.TrimPrefix(i.String(), "Curve")] = i
}
}

curve = MaxCurveId
if _, ok = tlsCurveNmToCurve[s]; ok {
curve = tlsCurveNmToCurve[s]
}

return
}

/*
ParseTlsCurves parses s as a comma-separated list of tls.CurveID names/integers and returns a slice of tls.CurveID.

See ParseTlsCurve for details, as this is mostly just a wrapper around it.

If no curves are found, curves will only contain MaxCurveId.
*/
func ParseTlsCurves(s string) (curves []tls.CurveID) {

var curveNms []string
var curve tls.CurveID
var err error

curveNms = strings.Split(s, ",")
curves = make([]tls.CurveID, 0, len(curveNms))

for _, nm := range curveNms {
if curve, err = ParseTlsCurve(nm); err != nil {
err = nil
continue
}
curves = append(curves, curve)
}

if len(curves) == 0 {
curves = []tls.CurveID{MaxCurveId}
}

return
}

/*
ParseTlsUri parses a "TLS URI"'s query parameters. All certs and keys must be in PEM format.

You probably don't need this and should instead be using TlsUri.ToTlsConfig.
It just wraps this, but is probably more convenient.
*/
func ParseTlsUri(tlsUri *url.URL) (tlsConf *tls.Config, err error) {

var b []byte
var rootCAs *x509.CertPool
var intermediateCAs []*x509.Certificate
var privKeys []crypto.PrivateKey
var tlsCerts []tls.Certificate
var allowInvalid bool
var ciphers []uint16
var curves []tls.CurveID
var params map[string][]string
var ok bool
var val string
var minVer uint16
var maxVer uint16
var buf *bytes.Buffer = new(bytes.Buffer)
var srvNm string = tlsUri.Hostname()

params = tlsUri.Query()

if params == nil {
tlsConf = &tls.Config{
ServerName: srvNm,
}
return
}

// These are all filepath(s).
for _, k := range []string{
TlsUriParamCa,
TlsUriParamCert,
TlsUriParamKey,
} {
if _, ok = params[k]; ok {
for idx, _ := range params[k] {
if err = paths.RealPath(&params[k][idx]); err != nil {
return
}
}
}
}

// CA cert(s).
buf.Reset()
if _, ok = params[TlsUriParamCa]; ok {
rootCAs = x509.NewCertPool()
for _, c := range params[TlsUriParamCa] {
if b, err = os.ReadFile(c); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
continue
}
}
buf.Write(b)
}
if rootCAs, _, intermediateCAs, err = ParseCA(buf.Bytes()); err != nil {
return
}
} else {
if rootCAs, err = x509.SystemCertPool(); err != nil {
return
}
}

// Keys. These are done first so we can match to a client certificate.
buf.Reset()
if _, ok = params[TlsUriParamKey]; ok {
for _, k := range params[TlsUriParamKey] {
if b, err = os.ReadFile(k); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
continue
} else {
return
}
}
buf.Write(b)
}
if privKeys, err = ParsePrivateKey(buf.Bytes()); err != nil {
return
}
}

// (Client) Certificate(s).
buf.Reset()
if _, ok = params[TlsUriParamCert]; ok {
for _, c := range params[TlsUriParamCert] {
if b, err = os.ReadFile(c); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
continue
} else {
return
}
}
buf.Write(b)
}
if tlsCerts, err = ParseLeafCert(buf.Bytes(), privKeys, intermediateCAs...); err != nil {
return
}
}

// Hostname (Override).
if _, ok = params[TlsUriParamSni]; ok {
srvNm = params[TlsUriParamSni][0]
}

// Disable Verification.
if _, ok = params[TlsUriParamNoVerify]; ok {
val = strings.ToLower(params[TlsUriParamNoVerify][0])
for _, i := range paramBoolValsTrue {
if i == val {
allowInvalid = true
break
}
}
}

// Ciphers.
if _, ok = params[TlsUriParamCipher]; ok {
ciphers = ParseTlsCiphers(strings.Join(params[TlsUriParamCipher], ","))
}

// Minimum TLS Protocol Version.
if _, ok = params[TlsUriParamMinTls]; ok {
if minVer, err = ParseTlsVersion(params[TlsUriParamMinTls][0]); err != nil {
return
}
}

// Maximum TLS Protocol Version.
if _, ok = params[TlsUriParamMaxTls]; ok {
if maxVer, err = ParseTlsVersion(params[TlsUriParamMaxTls][0]); err != nil {
return
}
}

// Curves.
if _, ok = params[TlsUriParamCurve]; ok {
curves = ParseTlsCurves(strings.Join(params[TlsUriParamCurve], ","))
}

tlsConf = &tls.Config{
Certificates: tlsCerts,
RootCAs: rootCAs,
ServerName: srvNm,
InsecureSkipVerify: allowInvalid,
CipherSuites: ciphers,
MinVersion: minVer,
MaxVersion: maxVer,
CurvePreferences: curves,
}

return
}

// ParseTlsVersion parses string s and attempts to derive a TLS version from it. If none is found, tlsVer will be 0.
func ParseTlsVersion(s string) (tlsVer uint16, err error) {

var nm string
var n uint64
var i uint16
var ok bool

if n, err = strconv.ParseUint(s, 10, 16); err != nil {
if errors.Is(err, strconv.ErrSyntax) {
// It's a name; parse below.
err = nil
} else {
return
}
} else {
// It's a number.
if nm = tls.VersionName(uint16(n)); strings.HasPrefix(nm, "0x") {
// ...but invalid.
err = ErrBadTlsVer
return
} else {
// Valid (as number). Return it.
tlsVer = uint16(n)
return
}
}

// If we get here, it should be parsed as a version string.
s = strings.ToUpper(s)
s = strings.ReplaceAll(s, "_", " ")
s = strings.ReplaceAll(s, "V", " ")
s = strings.TrimSpace(s)
if !strings.HasPrefix(s, "SSL") && !strings.HasPrefix(s, "TLS ") {
s = "TLS " + s
}

// We build a dynamic map of version names to uint16s (if not already created).
if tlsVerNmToUint == nil {
tlsVerNmToUint = make(map[string]uint16)
for i = MinTlsVer; i <= MaxTlsVer; i++ {
if nm = tls.VersionName(i); !strings.HasPrefix(nm, "0x") {
tlsVerNmToUint[nm] = i
}
}
}

if i, ok = tlsVerNmToUint[s]; ok {
tlsVer = i
}

return
}

/*
ParseCA parses PEM bytes and returns an *x509.CertPool in caCerts.

Concatenated PEM files are supported.

Any keys found will be filtered out, as will any leaf certificates.

Any *intermediate* CAs (the certificate is a CA but it is not self-signed) will be returned separate from
certPool.

Ordering from the file is preserved in the returned slices.
*/
func ParseCA(certRaw []byte) (certPool *x509.CertPool, rootCerts []*x509.Certificate, intermediateCerts []*x509.Certificate, err error) {

var pemBlocks []*pem.Block
var cert *x509.Certificate
var certs []*x509.Certificate

if pemBlocks, err = SplitPem(certRaw); err != nil {
return
}

// Filter out keys etc. and non-CA certs.
for _, b := range pemBlocks {
if b.Type != "CERTIFICATE" {
continue
}
if cert, err = x509.ParseCertificate(b.Bytes); err != nil {
return
}
if !cert.IsCA {
continue
}
certs = append(certs, cert)
}

for _, cert = range certs {
if bytes.Equal(cert.RawIssuer, cert.RawSubject) {
// It's a root/self-signed.
rootCerts = append(rootCerts, cert)
} else {
// It's an intermediate.
intermediateCerts = append(intermediateCerts, cert)
}
}

if rootCerts != nil {
certPool = x509.NewCertPool()
for _, cert = range rootCerts {
certPool.AddCert(cert)
}
}

return
}

/*
ParseLeafCert parses PEM bytes from a (client) certificate file, iterates over a slice of
crypto.PrivateKey (finding one that matches), and returns one (or more) tls.Certificate.

The key may also be combined with the certificate in the same file.

If no private key matches or no client cert is found in the file, tlsCerts will be nil/missing
that certificate but no error will be returned.
This behavior can be avoided by passing a nil slice to keys.

Any leaf certificates ("server" certificate, as opposed to a signer/issuer) found in the file
will be assumed to be the desired one(s).

Any additional/supplementary intermediates may be provided. Any present in the PEM bytes (certRaw) will be included.

Any *root* CAs found will be discarded. They should/can be extracted seperately via ParseCA.

The parsed and paired certificates and keys can be found in each respective tls.Certificate.Leaf and tls.Certificate.PrivateKey.
Any certs without a corresponding key will be discarded.
*/
func ParseLeafCert(certRaw []byte, keys []crypto.PrivateKey, intermediates ...*x509.Certificate) (tlsCerts []tls.Certificate, err error) {

var pemBlocks []*pem.Block
var cert *x509.Certificate
var certs []*x509.Certificate
var caCerts []*x509.Certificate
var parsedKeys []crypto.PrivateKey
var isMatched bool
var foundKey crypto.PrivateKey
var interBytes [][]byte
var skipKeyPair bool = keys == nil
var parsedKeysBuf *bytes.Buffer = new(bytes.Buffer)

if pemBlocks, err = SplitPem(certRaw); err != nil {
return
}

for _, b := range pemBlocks {
if strings.Contains(b.Type, "PRIVATE KEY") {
parsedKeysBuf.Write(pem.EncodeToMemory(b))
continue
}
if b.Type != "CERTIFICATE" {
continue
}
if cert, err = x509.ParseCertificate(b.Bytes); err != nil {
return
}
if cert.IsCA {
if bytes.Equal(cert.RawIssuer, cert.RawSubject) {
caCerts = append(caCerts, cert)
} else {
intermediates = append(intermediates, cert)
}
}
certs = append(certs, cert)
}

if intermediates != nil && len(intermediates) != 0 {
interBytes = make([][]byte, len(intermediates))
for _, i := range intermediates {
interBytes = append(interBytes, i.Raw)
}
}

if parsedKeysBuf.Len() != 0 {
if parsedKeys, err = ParsePrivateKey(parsedKeysBuf.Bytes()); err != nil {
return
}
keys = append(keys, parsedKeys...)
}

// Now pair the certs and keys, and combine as a tls.Certificate.
for _, cert = range certs {
foundKey = nil
for _, k := range keys {
if isMatched, err = IsMatchedPair(k, cert); err != nil {
return
}
if isMatched {
foundKey = k
break
}
}
if foundKey == nil && !skipKeyPair {
continue
}
tlsCerts = append(
tlsCerts,
tls.Certificate{
Certificate: append([][]byte{cert.Raw}, interBytes...),
PrivateKey: foundKey,
Leaf: cert,
},
)
}

_ = caCerts

return
}

/*
ParsePrivateKey parses PEM bytes to a private key. Multiple keys may be concatenated in the same file.

Any public keys, certificates, etc. found will be discarded.
*/
func ParsePrivateKey(keyRaw []byte) (keys []crypto.PrivateKey, err error) {

var privKey crypto.PrivateKey
var pemBlocks []*pem.Block

if pemBlocks, err = SplitPem(keyRaw); err != nil {
return
}

for _, b := range pemBlocks {
if !strings.Contains(b.Type, "PRIVATE KEY") {
continue
}
switch b.Type {
case "RSA PRIVATE KEY": // PKCS#1
if privKey, err = x509.ParsePKCS1PrivateKey(b.Bytes); err != nil {
return
}
keys = append(keys, privKey)
case "EC PRIVATE KEY": // SEC 1, ASN.1 DER
if privKey, err = x509.ParseECPrivateKey(b.Bytes); err != nil {
return
}
keys = append(keys, privKey)
case "PRIVATE KEY": // PKCS#8
if privKey, err = x509.ParsePKCS8PrivateKey(b.Bytes); err != nil {
return
}
keys = append(keys, privKey)
default:
err = ErrUnknownKey
return
}
}

// TODO

return
}

// SplitPem splits a single block of bytes into one (or more) (encoding/)pem.Blocks.
func SplitPem(pemRaw []byte) (blocks []*pem.Block, err error) {

var block *pem.Block
var rest []byte

for block, rest = pem.Decode(pemRaw); block != nil; block, rest = pem.Decode(rest) {
blocks = append(blocks, block)
}

return
}

View File

@ -1,37 +0,0 @@
package cryptparse

import (
`crypto/tls`
"testing"
)

func TestCiphers(t *testing.T) {

var err error
var cs *tls.CipherSuite

// Good ciphers
for _, cn := range []string{
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"tls ecdhe ecdsa with chacha20 poly1305 sha256",
} {
if cs, err = ParseTlsCipherSuiteStrict(cn); err != nil {
t.Fatalf("ERROR parsing good cipher '%s': %v", cn, err)
}
if cs.Name != cn {
t.Logf("Cipher name change: '%s' => '%s'", cn, cs.Name)
}
t.Logf("Cipher for '%s':\n%#v", cn, cs)
}

// Bad ciphers
for _, cn := range []string{
"TLS_BAD_CIPHER",
} {
if cs, err = ParseTlsCipherSuiteStrict(cn); err == nil {
t.Fatalf("ERROR parsing bad cipher '%s'; err is nil", cn)
}
}

_ = cs
}

View File

@ -1,217 +0,0 @@
package cryptparse

import (
`bytes`
`crypto`
`crypto/tls`
`crypto/x509`
`errors`
`fmt`
`net/url`
`os`
`strings`

`r00t2.io/sysutils/paths`
)

// Normalize ensures that all specified filepaths are absolute, etc.
func (t *TlsFlat) Normalize() (err error) {

if t.Certs != nil {
for _, c := range t.Certs {
if err = paths.RealPath(&c.CertFile); err != nil {
return
}
if c.KeyFile != nil {
if err = paths.RealPath(c.KeyFile); err != nil {
return
}
}
}
}
if t.CaFiles != nil {
for idx, _ := range t.CaFiles {
if err = paths.RealPath(&t.CaFiles[idx]); err != nil {
return
}
}
}

return
}

/*
ToTlsConfig returns a tls.Config from a TlsFlat. Note that it will have Normalize called on it.

Unfortunately it's not possible for this library to do the reverse, as CA certificates are not able to be extracted from an x509.CertPool.
*/
func (t *TlsFlat) ToTlsConfig() (tlsConf *tls.Config, err error) {

var b []byte
var rootCAs *x509.CertPool
var intermediateCAs []*x509.Certificate
var privKeys []crypto.PrivateKey
var tlsCerts []tls.Certificate
var parsedTlsCerts []tls.Certificate
var ciphers []uint16
var curves []tls.CurveID
var minVer uint16
var maxVer uint16
var buf *bytes.Buffer = new(bytes.Buffer)
var srvNm string = t.SniName

// Normalize any filepaths before validation.
if err = t.Normalize(); err != nil {
return
}

// And validate.
if err = validate.Struct(t); err != nil {
return
}

// CA cert(s).
buf.Reset()
if t.CaFiles != nil {
rootCAs = x509.NewCertPool()
for _, c := range t.CaFiles {
if b, err = os.ReadFile(c); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = nil
continue
}
}
buf.Write(b)
}
if rootCAs, _, intermediateCAs, err = ParseCA(buf.Bytes()); err != nil {
return
}
} else {
if rootCAs, err = x509.SystemCertPool(); err != nil {
return
}
}

// Keys and Certs. They are assumed to be matched.
if t.Certs != nil {
for _, c := range t.Certs {
privKeys = nil
if c.KeyFile != nil {
if b, err = os.ReadFile(*c.KeyFile); err != nil {
return
}
if privKeys, err = ParsePrivateKey(b); err != nil {
return
}
}
if b, err = os.ReadFile(c.CertFile); err != nil {
return
}
if parsedTlsCerts, err = ParseLeafCert(b, privKeys, intermediateCAs...); err != nil {
return
}
tlsCerts = append(tlsCerts, parsedTlsCerts...)
}
}

// Ciphers.
if t.CipherSuites != nil {
ciphers = ParseTlsCiphers(strings.Join(t.CipherSuites, ","))
}

// Minimum TLS Protocol Version.
if t.MinTlsProtocol != nil {
if minVer, err = ParseTlsVersion(*t.MinTlsProtocol); err != nil {
return
}
}

// Maximum TLS Protocol Version.
if t.MaxTlsProtocol != nil {
if maxVer, err = ParseTlsVersion(*t.MaxTlsProtocol); err != nil {
return
}
}

// Curves.
if t.Curves != nil {
curves = ParseTlsCurves(strings.Join(t.Curves, ","))
}

tlsConf = &tls.Config{
Certificates: tlsCerts,
RootCAs: rootCAs,
ServerName: srvNm,
InsecureSkipVerify: t.SkipVerify,
CipherSuites: ciphers,
MinVersion: minVer,
MaxVersion: maxVer,
CurvePreferences: curves,
}
return
}

// ToTlsUri returns a TlsUri from a TlsFlat.
func (t *TlsFlat) ToTlsUri() (tlsUri *TlsUri, err error) {

var u *url.URL

if u, err = url.Parse(fmt.Sprintf("tls://%v/", t.SniName)); err != nil {
return
}

// CA cert(s).
if t.CaFiles != nil {
for _, c := range t.CaFiles {
u.Query().Add(TlsUriParamCa, c)
}
}

// Keys and Certs.
if t.Certs != nil {
for _, c := range t.Certs {
u.Query().Add(TlsUriParamCert, c.CertFile)
if c.KeyFile != nil {
u.Query().Add(TlsUriParamKey, *c.KeyFile)
}
}
}

// Enforce the SNI hostname.
u.Query().Add(TlsUriParamSni, t.SniName)

// Disable Verification.
if t.SkipVerify {
u.Query().Add(TlsUriParamNoVerify, "1")
}

// Ciphers.
if t.CipherSuites != nil {
for _, c := range t.CipherSuites {
u.Query().Add(TlsUriParamCipher, c)
}
}

// Minimum TLS Protocol Version.
if t.MinTlsProtocol != nil {
u.Query().Add(TlsUriParamMinTls, *t.MinTlsProtocol)
}

// Maximum TLS Protocol Version.
if t.MaxTlsProtocol != nil {
u.Query().Add(TlsUriParamMaxTls, *t.MaxTlsProtocol)
}

// Curves.
if t.Curves != nil {
for _, c := range t.Curves {
u.Query().Add(TlsUriParamCurve, c)
}
}

tlsUri = &TlsUri{
URL: u,
}

return
}

View File

@ -1,256 +0,0 @@
package cryptparse

import (
`crypto`
`crypto/tls`
`net`
`net/url`
`os`
`strings`
)

/*
WithConn returns a (crypto/)tls.Conn from an existing/already dialed net.Conn.

underlying should be a "bare" net.Conn; behavior is undefined/unknown if the underlying conn is already a (crypto/)tls.Conn.
*/
func (t *TlsUri) WithConn(underlying net.Conn) (conn *tls.Conn, err error) {

var cfg *tls.Config

if cfg, err = t.ToTlsConfig(); err != nil {
return
}

conn = tls.Client(underlying, cfg)

return
}

/*
ToConn returns a "bare" net.Conn (already dialed) from a TlsUri.

Note that this does NOT include the TLS configured or initialized; use TlsUri.ToTlsConn for that.
(A (crypto/)tls.Conn conforms to net.Conn.)

An error will be returned if no port is explicitly defined in the TlsUri.
*/
func (t *TlsUri) ToConn() (conn net.Conn, err error) {

var ok bool
var connHost string
var params map[string][]string
var netType string = DefaultNetType

params = t.Query()

if params != nil {
if _, ok = params[TlsUriParamNet]; ok {
netType = params[TlsUriParamNet][0]
}
}
netType = strings.ToLower(netType)

switch netType {
case "unix", "unixgram", "unixpacket":
connHost = t.Path
default:
connHost = t.Host
}

if conn, err = net.Dial(netType, connHost); err != nil {
return
}

return
}

/*
ToTlsConfig returns a *tls.Config from a TlsUri.

Unfortunately it's not possible for this library to do the reverse, as CA certificates are not able to be extracted from an x509.CertPool.
*/
func (t *TlsUri) ToTlsConfig() (cfg *tls.Config, err error) {

if cfg, err = ParseTlsUri(t.URL); err != nil {
return
}

return
}

/*
ToTlsConn returns a (crypto/)tls.Conn (already dialed) from a TlsUri.

An error will be returned if no port is explicitly defined in the TlsUri.
*/
func (t *TlsUri) ToTlsConn() (conn *tls.Conn, err error) {

var ok bool
var cfg *tls.Config
var connHost string
var params map[string][]string
var netType string = DefaultNetType

if cfg, err = t.ToTlsConfig(); err != nil {
return
}

params = t.Query()

if params != nil {
if _, ok = params[TlsUriParamNet]; ok {
netType = params[TlsUriParamNet][0]
}
}
netType = strings.ToLower(netType)

switch netType {
case "unix", "unixgram", "unixpacket":
connHost = t.Path
default:
connHost = t.Host
}

if conn, err = tls.Dial(netType, connHost, cfg); err != nil {
return
}

return
}

// ToTlsFlat returns a *TlsFlat from a TlsUri.
func (t *TlsUri) ToTlsFlat() (tlsFlat *TlsFlat, err error) {

var b []byte
var params url.Values
var paramMap map[string][]string
// These also have maps so they can backmap filenames.
var privKeys []crypto.PrivateKey
var privKeyMap map[string][]crypto.PrivateKey
var tlsCerts []tls.Certificate
var tlsCertMap map[string][]tls.Certificate
var isMatch bool
var fCert *TlsFlatCert
var val string
var f TlsFlat = TlsFlat{
SniName: t.Hostname(),
SkipVerify: false,
Certs: nil,
CaFiles: nil,
CipherSuites: nil,
MinTlsProtocol: nil,
MaxTlsProtocol: nil,
Curves: nil,
}

params = t.Query()
paramMap = params

if params == nil {
tlsFlat = &f
return
}

// CA cert(s).
if t.Query().Has(TlsUriParamCa) {
f.CaFiles = append(f.CaFiles, paramMap[TlsUriParamCa]...)
}

// Keys and Certs. These are done first so we can match to a client certificate.
if t.Query().Has(TlsUriParamKey) {
privKeyMap = make(map[string][]crypto.PrivateKey)
for _, kFile := range paramMap[TlsUriParamKey] {
if b, err = os.ReadFile(kFile); err != nil {
return
}
if privKeyMap[kFile], err = ParsePrivateKey(b); err != nil {
return
}
privKeys = append(privKeys, privKeyMap[kFile]...)
}
}
if t.Query().Has(TlsUriParamCert) {
tlsCertMap = make(map[string][]tls.Certificate)
for _, cFile := range paramMap[TlsUriParamCert] {
if b, err = os.ReadFile(cFile); err != nil {
return
}
if tlsCertMap[cFile], err = ParseLeafCert(b, privKeys); err != nil {
return
}
tlsCerts = append(tlsCerts, tlsCertMap[cFile]...)
}
}
// We then correlate. Whew, lads.
for cFile, c := range tlsCertMap {
for _, cert := range c {
for kFile, k := range privKeyMap {
if isMatch, err = IsMatchedPair(k, cert.Leaf); err != nil {
return
} else if isMatch {
fCert = &TlsFlatCert{
CertFile: cFile,
KeyFile: new(string),
}
*fCert.KeyFile = kFile
f.Certs = append(f.Certs, fCert)
}
}
}
}

// Hostname.
if t.Query().Has(TlsUriParamSni) {
f.SniName = t.Query().Get(TlsUriParamSni)
}

// Disable verification.
if t.Query().Has(TlsUriParamNoVerify) {
val = strings.ToLower(t.Query().Get(TlsUriParamNoVerify))
for _, i := range paramBoolValsTrue {
if val == i {
f.SkipVerify = true
break
}
}
}

// Ciphers.
if t.Query().Has(TlsUriParamCipher) {
f.CipherSuites = params[TlsUriParamCipher]
}

// Minimum TLS Protocol Version.
if t.Query().Has(TlsUriParamMinTls) {
f.MinTlsProtocol = new(string)
*f.MinTlsProtocol = t.Query().Get(TlsUriParamMinTls)
}

// Maximum TLS Protocol Version.
if t.Query().Has(TlsUriParamMaxTls) {
f.MaxTlsProtocol = new(string)
*f.MaxTlsProtocol = t.Query().Get(TlsUriParamMaxTls)
}

// Curves.
if t.Query().Has(TlsUriParamCurve) {
f.Curves = params[TlsUriParamCurve]
}

tlsFlat = &f

return
}

// ToURL returns the *url.URL representation of a TlsUri. Note that the params will remain, so remove them explicitly if needed.
func (t *TlsUri) ToURL() (u *url.URL) {

if t == nil {
return
}

u = t.URL

return
}

View File

@ -1,30 +0,0 @@
package cryptparse

import (
`encoding/xml`
`net/url`
)

// TlsFlat provides an easy structure to marshal/unmarshal a tls.Config from/to a data structure (JSON, XML, etc.).
type TlsFlat struct {
XMLName xml.Name `xml:"tlsConfig" json:"-" yaml:"-" toml:"-"`
SniName string `json:"sni_name" xml:"sniName,attr" yaml:"SniName" toml:"SniName" required:"true" validate:"required"`
SkipVerify bool `json:"skip_verify,omitempty" xml:"skipVerify,attr,omitempty" yaml:"SkipVerify,omitempty" toml:"SkipVerify,omitempty"`
Certs []*TlsFlatCert `json:"certs,omitempty" xml:"certs>cert,omitempty" yaml:"Certs,omitempty" toml:"Certs,omitempty" validate:"omitempty,dive"`
CaFiles []string `json:"ca_files,omitempty" xml:"roots>ca,omitempty" yaml:"CaFiles,omitempty" toml:"CaFiles,omitempty" validate:"omitempty,dive,filepath"`
CipherSuites []string `json:"cipher_suites,omitempty" xml:"ciphers,omitempty" yaml:"CipherSuites,omitempty" toml:"CipherSuites,omitempty"`
MinTlsProtocol *string `json:"min_tls_protocol,omitempty" xml:"minTlsProtocol,attr,omitempty" yaml:"MinTlsProtocol,omitempty" toml:"MinTlsProtocol,omitempty"`
MaxTlsProtocol *string `json:"max_tls_protocol,omitempty" xml:"maxTlsProtocol,attr,omitempty" yaml:"MaxTlsProtocol,omitempty" toml:"MaxTlsProtocol,omitempty"`
Curves []string `json:"curves,omitempty" xml:"curves>curve,omitempty" yaml:"Curves,omitempty" toml:"Curves,omitempty" validate:"omitempty,dive"`
}

// TlsFlatCert represents a certificate (and, possibly, paired key).
type TlsFlatCert struct {
XMLName xml.Name `xml:"cert" json:"-" yaml:"-" toml:"-"`
KeyFile *string `json:"key,omitempty" xml:"key,attr,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty" validate:"omitempty,filepath"`
CertFile string `json:"cert" xml:",chardata" yaml:"Certificate" toml:"Certificate" required:"true" validate:"required,filepath"`
}

type TlsUri struct {
*url.URL
}

View File

@ -159,7 +159,7 @@ func GetPidEnvMap(pid uint32) (envMap map[string]string, err error) {
var procPath string
var exists bool

envMap = make(map[string]string, 0)
envMap = make(map[string]string)

procPath = fmt.Sprintf("/proc/%v/environ", pid)


View File

@ -13,7 +13,7 @@ func envListToMap(envs []string) (envMap map[string]string) {
var kv []string
var k, v string

envMap = make(map[string]string, 0)
envMap = make(map[string]string)

for _, ev := range envs {
kv = strings.SplitN(ev, "=", 2)

3
fsutils/TODO Normal file
View File

@ -0,0 +1,3 @@
- XATTRS
(see FS_XFLAG_* in fs.h, FS_IOC_FSGETXATTR/FS_IOC_FSSETXATTR)
- fs label, UUID? (fs.h)

View File

@ -1,101 +1,36 @@
package fsutils

import (
`github.com/g0rbe/go-chattr`
)

// https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h
const (
SecureDelete uint32 = chattr.FS_SECRM_FL // Secure deletion
UnDelete = chattr.FS_UNRM_FL // Undelete
CompressFile = chattr.FS_COMPR_FL // Compress file
SyncUpdatechattr = chattr.FS_SYNC_FL // Synchronous updates
Immutable = chattr.FS_IMMUTABLE_FL // Immutable file
AppendOnly = chattr.FS_APPEND_FL // Writes to file may only append
NoDumpFile = chattr.FS_NODUMP_FL // Do not dump file
NoUpdateAtime = chattr.FS_NOATIME_FL // Do not update atime
IsDirty = chattr.FS_DIRTY_FL // Nobody knows what this does, lol.
CompressedClusters = chattr.FS_COMPRBLK_FL // One or more compressed clusters
NoCompress = chattr.FS_NOCOMP_FL // Don't compress
EncFile = chattr.FS_ENCRYPT_FL // Encrypted file
BtreeFmt = chattr.FS_BTREE_FL // Btree format dir
HashIdxDir = chattr.FS_INDEX_FL // Hash-indexed directory
AfsDir = chattr.FS_IMAGIC_FL // AFS directory
ReservedExt3 = chattr.FS_JOURNAL_DATA_FL // Reserved for ext3
NoMergeTail = chattr.FS_NOTAIL_FL // File tail should not be merged
DirSync = chattr.FS_DIRSYNC_FL // dirsync behaviour (directories only)
DirTop = chattr.FS_TOPDIR_FL // Top of directory hierarchies
ReservedExt4a = chattr.FS_HUGE_FILE_FL // Reserved for ext4
Extents = chattr.FS_EXTENT_FL // Extents
LargeEaInode = chattr.FS_EA_INODE_FL // Inode used for large EA
ReservedExt4b = chattr.FS_EOFBLOCKS_FL // Reserved for ext4
NoCOWFile = chattr.FS_NOCOW_FL // Do not cow file
ReservedExt4c = chattr.FS_INLINE_DATA_FL // Reserved for ext4
UseParentProjId = chattr.FS_PROJINHERIT_FL // Create with parents projid
ReservedExt2 = chattr.FS_RESERVED_FL // Reserved for ext2 lib
)

var (
// AttrNameValueMap contains a mapping of attribute names (as designated by this package) to their flag values.
AttrNameValueMap map[string]uint32 = map[string]uint32{
"SecureDelete": SecureDelete,
"UnDelete": UnDelete,
"CompressFile": CompressFile,
"SyncUpdatechattr": SyncUpdatechattr,
"Immutable": Immutable,
"AppendOnly": AppendOnly,
"NoDumpFile": NoDumpFile,
"NoUpdateAtime": NoUpdateAtime,
"IsDirty": IsDirty,
"CompressedClusters": CompressedClusters,
"NoCompress": NoCompress,
"EncFile": EncFile,
"BtreeFmt": BtreeFmt,
"HashIdxDir": HashIdxDir,
"AfsDir": AfsDir,
"ReservedExt3": ReservedExt3,
"NoMergeTail": NoMergeTail,
"DirSync": DirSync,
"DirTop": DirTop,
"ReservedExt4a": ReservedExt4a,
"Extents": Extents,
"LargeEaInode": LargeEaInode,
"ReservedExt4b": ReservedExt4b,
"NoCOWFile": NoCOWFile,
"ReservedExt4c": ReservedExt4c,
"UseParentProjId": UseParentProjId,
"ReservedExt2": ReservedExt2,
}
/*
AttrValueNameMap contains a mapping of attribute flags to their names (as designated by this package).
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so be forewarned.
linuxFsAttrsListOrder defines the order the attributes are printed in per e2fsprogs.

See flags_name at https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c for order.
Up to date as of e2fsprogs v1.47.1, Linux 6.12-rc7.

The below are the struct field names for easier reflection.
*/
AttrValueNameMap map[uint32]string = map[uint32]string{
SecureDelete: "SecureDelete",
UnDelete: "UnDelete",
CompressFile: "CompressFile",
SyncUpdatechattr: "SyncUpdatechattr",
Immutable: "Immutable",
AppendOnly: "AppendOnly",
NoDumpFile: "NoDumpFile",
NoUpdateAtime: "NoUpdateAtime",
IsDirty: "IsDirty",
CompressedClusters: "CompressedClusters",
NoCompress: "NoCompress",
EncFile: "EncFile",
BtreeFmt: "BtreeFmt|HashIdxDir", // Well THIS is silly and seems like an oversight. Both FS_BTREE_FL and FS_INDEX_FL have the same flag. Confirmed in kernel source.
AfsDir: "AfsDir",
ReservedExt3: "ReservedExt3",
NoMergeTail: "NoMergeTail",
DirSync: "DirSync",
DirTop: "DirTop",
ReservedExt4a: "ReservedExt4a",
Extents: "Extents",
LargeEaInode: "LargeEaInode",
ReservedExt4b: "ReservedExt4b",
NoCOWFile: "NoCOWFile",
ReservedExt4c: "ReservedExt4c",
UseParentProjId: "UseParentProjId",
ReservedExt2: "ReservedExt2",
linuxFsAttrsListOrder []string = []string{
"SecureDelete",
"UnDelete",
"SyncUpdate",
"DirSync",
"Immutable",
"AppendOnly",
"NoDumpFile",
"NoUpdateAtime",
"CompressFile",
"EncFile",
"ReservedExt3",
"HashIdxDir",
"NoMergeTail",
"DirTop",
"Extents",
"NoCOWFile",
"DAX",
"CaseInsensitive",
"ReservedExt4c",
"UseParentProjId",
"VerityProtected",
"NoCompress",
}
)

127
fsutils/consts_lin.go Normal file
View File

@ -0,0 +1,127 @@
package fsutils

/*
https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h "Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)"
Up to date as of Linux 6.12-rc7.
*/
const (
SecureDelete fsAttr = 1 << iota // Secure deletion
UnDelete // Undelete
CompressFile // Compress file
SyncUpdate // Synchronous updates
Immutable // Immutable file
AppendOnly // Writes to file may only append
NoDumpFile // Do not dump file
NoUpdateAtime // Do not update atime
IsDirty // Nobody knows what this does, lol.
CompressedClusters // One or more compressed clusters
NoCompress // Don't compress
EncFile // Encrypted file
BtreeFmt // Btree format dir
AfsDir // AFS directory
ReservedExt3 // Reserved for ext3
NoMergeTail // File tail should not be merged
DirSync // dirsync behaviour (directories only)
DirTop // Top of directory hierarchies
ReservedExt4a // Reserved for ext4
Extents // Extents
VerityProtected // Verity-protected inode
LargeEaInode // Inode used for large EA
ReservedExt4b // Reserved for ext4
NoCOWFile // Do not cow file
_ // (Unused)
DAX // Inode is DAX
_ // (Unused)
_ // (Unused)
ReservedExt4c // Reserved for ext4
UseParentProjId // Create with parents projid
CaseInsensitive // Folder is case-insensitive
ReservedExt2 // Reserved for ext2 lib
)

// These are the same value. For some reason.
const (
HashIdxDir fsAttr = BtreeFmt // Hash-indexed directory
)

var (
// AttrNameValueMap contains a mapping of attribute names (as designated by this package) to their flag values.
AttrNameValueMap map[string]fsAttr = map[string]fsAttr{
"SecureDelete": SecureDelete,
"UnDelete": UnDelete,
"CompressFile": CompressFile,
"SyncUpdate": SyncUpdate,
"Immutable": Immutable,
"AppendOnly": AppendOnly,
"NoDumpFile": NoDumpFile,
"NoUpdateAtime": NoUpdateAtime,
"IsDirty": IsDirty,
"CompressedClusters": CompressedClusters,
"NoCompress": NoCompress,
"EncFile": EncFile,
"BtreeFmt": BtreeFmt,
"HashIdxDir": HashIdxDir,
"AfsDir": AfsDir,
"ReservedExt3": ReservedExt3,
"NoMergeTail": NoMergeTail,
"DirSync": DirSync,
"DirTop": DirTop,
"ReservedExt4a": ReservedExt4a,
"Extents": Extents,
"VerityProtected": VerityProtected,
"LargeEaInode": LargeEaInode,
"ReservedExt4b": ReservedExt4b,
"NoCOWFile": NoCOWFile,
"DAX": DAX,
"ReservedExt4c": ReservedExt4c,
"UseParentProjId": UseParentProjId,
"CaseInsensitive": CaseInsensitive,
"ReservedExt2": ReservedExt2,
}

/*
AttrValueNameMap contains a mapping of attribute flags to their names (as designated by this package).
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so their string value is unpredictable.
*/
AttrValueNameMap map[fsAttr]string = invertMap(AttrNameValueMap)

// KernelNameValueMap allows lookups using the symbol name as used in the Linux kernel source.
KernelNameValueMap map[string]fsAttr = map[string]fsAttr{
"FS_SECRM_FL": SecureDelete,
"FS_UNRM_FL": UnDelete,
"FS_COMPR_FL": CompressFile,
"FS_SYNC_FL": SyncUpdate,
"FS_IMMUTABLE_FL": Immutable,
"FS_APPEND_FL": AppendOnly,
"FS_NODUMP_FL": NoDumpFile,
"FS_NOATIME_FL": NoUpdateAtime,
"FS_DIRTY_FL": IsDirty,
"FS_COMPRBLK_FL": CompressedClusters,
"FS_NOCOMP_FL": NoCompress,
"FS_ENCRYPT_FL": EncFile,
"FS_BTREE_FL": BtreeFmt,
"FS_INDEX_FL": HashIdxDir,
"FS_IMAGIC_FL": AfsDir,
"FS_JOURNAL_DATA_FL": ReservedExt3,
"FS_NOTAIL_FL": NoMergeTail,
"FS_DIRSYNC_FL": DirSync,
"FS_TOPDIR_FL": DirTop,
"FS_HUGE_FILE_FL": ReservedExt4a,
"FS_EXTENT_FL": Extents,
"FS_VERITY_FL": VerityProtected,
"FS_EA_INODE_FL": LargeEaInode,
"FS_EOFBLOCKS_FL": ReservedExt4b,
"FS_NOCOW_FL": NoCOWFile,
"FS_DAX_FL": DAX,
"FS_INLINE_DATA_FL": ReservedExt4c,
"FS_PROJINHERIT_FL": UseParentProjId,
"FS_CASEFOLD_FL": CaseInsensitive,
"FS_RESERVED_FL": ReservedExt2,
}

/*
KernelValueNameMap contains a mapping of attribute flags to their kernel source symbol name.
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so their string value is unpredictable.
*/
KernelValueNameMap map[fsAttr]string = invertMap(KernelNameValueMap)
)

7
fsutils/doc.go Normal file
View File

@ -0,0 +1,7 @@
/*
fsutils is a collection of filesystem-related functions, types, etc.

Currently it's only a (fixed/actually working) reimplementation of github.com/g0rbe/go-chattr.
(Note to library maintainers, if someone reports an integer overflow and even tells you how to fix it, you should probably fix it.)
*/
package fsutils

11
fsutils/errs.go Normal file
View File

@ -0,0 +1,11 @@
package fsutils

import (
`syscall`
)

var (
// Yes, I know. "Why ENOTTY?" I don't know, ask Linus.
// If you see "inappropriate ioctl for device", it's this'un.
ErrFsAttrsUnsupported error = syscall.ENOTTY
)

View File

@ -1,44 +1,16 @@
package fsutils

import (
`os`
`reflect`
// invertMap returns some handy consts remapping for easier lookups.
func invertMap(origMap map[string]fsAttr) (newMap map[fsAttr]string) {

`github.com/g0rbe/go-chattr`
`r00t2.io/sysutils/paths`
)

func GetAttrs(path string) (attrs *FsAttrs, err error) {

var f *os.File
var evalAttrs FsAttrs
var attrVal uint32
var reflectVal reflect.Value
var field reflect.Value
var myPath string = path

if err = paths.RealPath(&myPath); err != nil {
if origMap == nil {
return
}
newMap = make(map[fsAttr]string)

if f, err = os.Open(myPath); err != nil {
return
for k, v := range origMap {
newMap[v] = k
}
defer f.Close()

reflectVal = reflect.ValueOf(&evalAttrs).Elem()

if attrVal, err = chattr.GetAttrs(f); err != nil {
return
}

for attrNm, attrInt := range AttrNameValueMap {
field = reflectVal.FieldByName(attrNm)
field.SetBool((attrVal & attrInt) != 0)
}

attrs = new(FsAttrs)
*attrs = evalAttrs

return
}

View File

@ -1,43 +1,96 @@
package fsutils

import (
`os`
`reflect`

`github.com/g0rbe/go-chattr`
`r00t2.io/sysutils/paths`
`strings`
)

func (f *FsAttrs) Apply(path string) (err error) {
/*
String returns a string representation (comparable to lsattr(1)) of an FsAttrs.

var file *os.File
var reflectVal reflect.Value
Not all flags are represented, as this aims for compatibility with e2fsprogs/lsattr output.
*/
func (f *FsAttrs) String() (s string) {

// Flags have their short name printed if set, otherwise a '-' placeholder is used.
// https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c

var refType reflect.Type
var refVal reflect.Value
var refField reflect.StructField
var fieldVal reflect.Value
var tagVal string
var sb strings.Builder

var myPath string = path

if err = paths.RealPath(&myPath); err != nil {
if f == nil {
s = strings.Repeat("-", len(linuxFsAttrsListOrder))
return
}
if file, err = os.Open(myPath); err != nil {
return
}
defer file.Close()

reflectVal = reflect.ValueOf(*f)

for attrNm, attrVal := range AttrNameValueMap {
fieldVal = reflectVal.FieldByName(attrNm)
refVal = reflect.ValueOf(*f)
refType = refVal.Type()
for _, fn := range linuxFsAttrsListOrder {
refField, _ = refType.FieldByName(fn)
tagVal = refField.Tag.Get("fsAttrShort")
if tagVal == "" || tagVal == "-" {
continue
}
fieldVal = refVal.FieldByName(fn)
if fieldVal.Bool() {
if err = chattr.SetAttr(file, attrVal); err != nil {
return
}
sb.WriteString(tagVal)
} else {
if err = chattr.UnsetAttr(file, attrVal); err != nil {
return
}
sb.WriteString("-")
}
}

s = sb.String()

return
}

/*
StringLong returns a more extensive/"human-friendly" representation (comparable to lsattr(1) wiih -l) of an Fsattrs.

Not all flags are represented, as this aims for compatibility with e2fsprogs/lsattr output.
*/
func (f *FsAttrs) StringLong() (s string) {

// The long names are separated via a commma then a space.
// If no attrs are set, the string "---" is used.
// https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c

var refType reflect.Type
var refVal reflect.Value
var refField reflect.StructField
var fieldVal reflect.Value
var tagVal string
var out []string

if f == nil {
s = strings.Repeat("-", 3)
return
}

refVal = reflect.ValueOf(*f)
refType = refVal.Type()
for _, fn := range linuxFsAttrsListOrder {
refField, _ = refType.FieldByName(fn)
tagVal = refField.Tag.Get("fsAttrLong")
if tagVal == "" || tagVal == "-" {
continue
}
fieldVal = refVal.FieldByName(fn)
if fieldVal.Bool() {
out = append(out, tagVal)
}
}

if out == nil || len(out) == 0 {
s = strings.Repeat("-", 3)
return
}

s = strings.Join(out, ", ")

return
}

View File

@ -0,0 +1,46 @@
//go:build linux

package fsutils

import (
`os`
`reflect`

`r00t2.io/sysutils/paths`
)

func (f *FsAttrs) Apply(path string) (err error) {

var file *os.File
var reflectVal reflect.Value
var fieldVal reflect.Value

if f == nil {
return
}

if err = paths.RealPath(&path); err != nil {
return
}
if file, err = os.Open(path); err != nil {
return
}
defer file.Close()

reflectVal = reflect.ValueOf(*f)

for attrNm, attrVal := range AttrNameValueMap {
fieldVal = reflectVal.FieldByName(attrNm)
if fieldVal.Bool() {
if err = setAttrs(file, attrVal); err != nil {
return
}
} else {
if err = unsetAttrs(file, attrVal); err != nil {
return
}
}
}

return
}

134
fsutils/funcs_linux.go Normal file
View File

@ -0,0 +1,134 @@
//go:build linux

package fsutils

import (
`os`
`reflect`
`unsafe`

`golang.org/x/sys/unix`
`r00t2.io/goutils/bitmask`
`r00t2.io/sysutils/paths`
)

func GetAttrs(path string) (attrs *FsAttrs, err error) {

var f *os.File
var evalAttrs FsAttrs
var attrVal fsAttr
var attrValBit bitmask.MaskBit
var reflectVal reflect.Value
var field reflect.Value
var myPath string = path

if err = paths.RealPath(&myPath); err != nil {
return
}

if f, err = os.Open(myPath); err != nil {
return
}
defer f.Close()

reflectVal = reflect.ValueOf(&evalAttrs).Elem()

if attrVal, err = getAttrs(f); err != nil {
return
}
attrValBit = bitmask.MaskBit(attrVal)

for attrNm, attrInt := range AttrNameValueMap {
field = reflectVal.FieldByName(attrNm)
field.SetBool(attrValBit.HasFlag(bitmask.MaskBit(attrInt)))
}

attrs = new(FsAttrs)
*attrs = evalAttrs

return
}

// getAttrs is the unexported low-level syscall to get attributes.
func getAttrs(f *os.File) (attrVal fsAttr, err error) {

var u uint
var curFlags int
// var errNo syscall.Errno

/*
if _, _, errNo = unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.FS_IOC_GETFLAGS, uintptr(unsafe.Pointer(&curFlags))); errNo != 0 {
err = os.NewSyscallError("ioctl: FS_IOC_GETFLAGS", errNo)
return
}
*/
if curFlags, err = unix.IoctlGetInt(int(f.Fd()), unix.FS_IOC_GETFLAGS); err != nil {
return
}

u = uint(curFlags)

attrVal = fsAttr(u)

return
}

// setAttrs is the unexported low-level syscall to set attributes. attrs may be OR'd.
func setAttrs(f *os.File, attrs fsAttr) (err error) {

var curAttrs fsAttr
var ab bitmask.MaskBit
var errNo unix.Errno
var val uint

if curAttrs, err = getAttrs(f); err != nil {
return
}
ab = bitmask.MaskBit(curAttrs)

if ab.HasFlag(bitmask.MaskBit(attrs)) {
return
}

ab.AddFlag(bitmask.MaskBit(attrs))

val = ab.Value()

/*
if err = unix.IoctlSetInt(int(f.Fd()), unix.FS_IOC_SETFLAGS, int(ab.Value())); err != nil {
return
}
*/
if _, _, errNo = unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.FS_IOC_SETFLAGS, uintptr(unsafe.Pointer(&val))); errNo != 0 {
err = os.NewSyscallError("ioctl: SYS_IOCTL", errNo)
return
}

return
}

// unsetAttrs is the unexported low-level syscall to remove attributes. attrs may be OR'd.
func unsetAttrs(f *os.File, attrs fsAttr) (err error) {

var curAttrs fsAttr
var ab bitmask.MaskBit

if curAttrs, err = getAttrs(f); err != nil {
return
}
ab = bitmask.MaskBit(curAttrs)

if !ab.HasFlag(bitmask.MaskBit(attrs)) {
return
}

ab.ClearFlag(bitmask.MaskBit(attrs))

/*
if err = unix.IoctlSetInt(int(f.Fd()), unix.FS_IOC_SETFLAGS, int(ab.Value())); err != nil {
return
}
*/

return
}

View File

@ -1,3 +1,5 @@
//go:build linux

package fsutils

import (
@ -7,12 +9,13 @@ import (
`os/user`
`testing`

`github.com/davecgh/go-spew/spew`
`r00t2.io/sysutils/paths`
)

var (
testFilename string = "testfile"
testErrBadUser error = errors.New("test must be run as root, on Linux")
testErrBadUser error = errors.New("test must be run as root")
)

func testChkUser() (err error) {
@ -36,12 +39,18 @@ func TestSetAttrs(t *testing.T) {
if attrs, err = GetAttrs(testFilename); err != nil {
t.Fatalf("Failed to get attrs for %v: %v", testFilename, err)
}
t.Logf("Attrs for %v:\n%#v", testFilename, attrs)
t.Logf("Attrs for %v (before):\n%s", testFilename, spew.Sdump(attrs))
attrs.CompressFile = true
attrs.SyncUpdate = true
attrs.SecureDelete = true
if err = attrs.Apply(testFilename); err != nil {
t.Fatalf("Failed to apply attrs to %v: %v", testFilename, err)
}
t.Logf("Applied new attrs to %v:\n%#v", testFilename, attrs)
if attrs, err = GetAttrs(testFilename); err != nil {
t.Fatalf("Failed to get attrs for %v: %v", testFilename, err)
}
t.Logf("Attrs for %v (after):\n%s", testFilename, spew.Sdump(attrs))
}

func TestMain(t *testing.M) {

View File

@ -1,32 +1,44 @@
package fsutils

// FsAttrs is a convenience struct around github.com/g0rbe/go-chattr.
import (
`r00t2.io/goutils/bitmask`
)

type fsAttr bitmask.MaskBit

/*
FsAttrs is a struct representation of filesystem attributes on Linux.
Up to date as of Linux 6.12-rc7.
*/
type FsAttrs struct {
SecureDelete bool
UnDelete bool
CompressFile bool
SyncUpdatechattr bool
Immutable bool
AppendOnly bool
NoDumpFile bool
NoUpdateAtime bool
IsDirty bool
CompressedClusters bool
NoCompress bool
EncFile bool
BtreeFmt bool
HashIdxDir bool
AfsDir bool
ReservedExt3 bool
NoMergeTail bool
DirSync bool
DirTop bool
ReservedExt4a bool
Extents bool
LargeEaInode bool
ReservedExt4b bool
NoCOWFile bool
ReservedExt4c bool
UseParentProjId bool
ReservedExt2 bool
SecureDelete bool `fsAttrShort:"s" fsAttrLong:"Secure_Deletion" fsAttrKern:"FS_SECRM_FL" json:"secure_delete" toml:"SecureDelete" yaml:"Secure Delete" xml:"secureDelete,attr"`
UnDelete bool `fsAttrShort:"u" fsAttrLong:"Undelete" fsAttrKern:"FS_UNRM_FL" json:"undelete" toml:"Undelete" yaml:"Undelete" xml:"undelete,attr"`
CompressFile bool `fsAttrShort:"c" fsAttrLong:"Compression_Requested" fsAttrKern:"FS_COMPR_FL" json:"compress" toml:"Compress" yaml:"Compress" xml:"compress,attr"`
SyncUpdate bool `fsAttrShort:"S" fsAttrLong:"Synchronous_Updates" fsAttrKern:"FS_SYNC_FL" json:"sync" toml:"SyncUpdate" yaml:"Synchronized Update" xml:"syncUpdate,attr"`
Immutable bool `fsAttrShort:"i" fsAttrLong:"Immutable" fsAttrKern:"FS_IMMUTABLE_FL" json:"immutable" toml:"Immutable" yaml:"Immutable" xml:"immutable,attr"`
AppendOnly bool `fsAttrShort:"a" fsAttrLong:"Append_Only" fsAttrKern:"FS_APPEND_FL" json:"append_only" toml:"AppendOnly" yaml:"Append Only" xml:"appendOnly,attr"`
NoDumpFile bool `fsAttrShort:"d" fsAttrLong:"No_Dump" fsAttrKern:"FS_NODUMP_FL" json:"no_dump" toml:"NoDump" yaml:"Disable Dumping" xml:"noDump,attr"`
NoUpdateAtime bool `fsAttrShort:"A" fsAttrLong:"No_Atime" fsAttrKern:"FS_NOATIME_FL" json:"no_atime" toml:"DisableAtime" yaml:"Disable Atime Updating" xml:"noAtime,attr"`
IsDirty bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_DIRTY_FL" json:"dirty" toml:"Dirty" yaml:"Dirty" xml:"dirty,attr"`
CompressedClusters bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_COMPRBLK_FL" json:"compress_clst" toml:"CompressedClusters" yaml:"Compressed Clusters" xml:"compressClst,attr"`
NoCompress bool `fsAttrShort:"m" fsAttrLong:"Dont_Compress" fsAttrKern:"FS_NOCOMP_FL" json:"no_compress" toml:"DisableCompression" yaml:"Disable Compression" xml:"noCompress,attr"`
EncFile bool `fsAttrShort:"E" fsAttrLong:"Encrypted" fsAttrKern:"FS_ENCRYPT_FL" json:"enc" toml:"Encrypted" yaml:"Encrypted" xml:"enc,attr"`
BtreeFmt bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_BTREE_FL" json:"btree" toml:"Btree" yaml:"Btree" xml:"btree,attr"`
HashIdxDir bool `fsAttrShort:"I" fsAttrLong:"Indexed_directory" fsAttrKern:"FS_INDEX_FL" json:"idx_dir" toml:"IdxDir" yaml:"Indexed Directory" xml:"idxDir,attr"`
AfsDir bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_IMAGIC_FL" json:"afs" toml:"AFS" yaml:"AFS" xml:"afs,attr"`
ReservedExt3 bool `fsAttrShort:"j" fsAttrLong:"Journaled_Data" fsAttrKern:"FS_JOURNAL_DATA_FL" json:"res_ext3" toml:"ReservedExt3" yaml:"Reserved Ext3" xml:"resExt3,attr"`
NoMergeTail bool `fsAttrShort:"t" fsAttrLong:"No_Tailmerging" fsAttrKern:"FS_NOTAIL_FL" json:"no_merge_tail" toml:"DisableTailmerging" yaml:"Disable Tailmerging" xml:"noMergeTail,attr"`
DirSync bool `fsAttrShort:"D" fsAttrLong:"Synchronous_Directory_Updates" fsAttrKern:"FS_DIRSYNC_FL" json:"dir_sync" toml:"DirSync" yaml:"Synchronized Directory Updates" xml:"dirSync,attr"`
DirTop bool `fsAttrShort:"T" fsAttrLong:"Top_of_Directory_Hierarchies" fsAttrKern:"FS_TOPDIR_FL" json:"dir_top" toml:"DirTop" yaml:"Top of Directory Hierarchies" xml:"dirTop,attr"`
ReservedExt4a bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_HUGE_FILE_FL" json:"res_ext4a" toml:"ReservedExt4A" yaml:"Reserved Ext4 A" xml:"resExt4a,attr"`
Extents bool `fsAttrShort:"e" fsAttrLong:"Extents" fsAttrKern:"FS_EXTENT_FL" json:"extents" toml:"Extents" yaml:"Extents" xml:"extents,attr"`
VerityProtected bool `fsAttrShort:"V" fsAttrLong:"Verity" fsAttrKern:"FS_VERITY_FL" json:"verity" toml:"Verity" yaml:"Verity Protected" xml:"verity,attr"`
LargeEaInode bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_EA_INODE_FL" json:"ea" toml:"EAInode" yaml:"EA Inode" xml:"ea,attr"`
ReservedExt4b bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_EOFBLOCKS_FL" json:"res_ext4b" toml:"ReservedExt4B" yaml:"Reserved Ext4 B" xml:"resExt4b,attr"`
NoCOWFile bool `fsAttrShort:"C" fsAttrLong:"No_COW" fsAttrKern:"FS_NOCOW_FL" json:"no_cow" toml:"NoCOW" yaml:"Disable COW" xml:"noCOW,attr"`
DAX bool `fsAttrShort:"x" fsAttrLong:"DAX" fsAttrKern:"FS_DAX_FL" json:"dax" toml:"DAX" yaml:"DAX" xml:"DAX,attr"`
ReservedExt4c bool `fsAttrShort:"N" fsAttrLong:"Inline_Data" fsAttrKern:"FS_INLINE_DATA_FL" json:"res_ext4c" toml:"ReservedExt4C" yaml:"Reserved Ext4 C" xml:"resExt4c,attr"`
UseParentProjId bool `fsAttrShort:"P" fsAttrLong:"Project_Hierarchy" fsAttrKern:"FS_PROJINHERIT_FL" json:"parent_proj_id" toml:"ParentProjId" yaml:"Use Parent Project ID" xml:"parentProjId,attr"`
CaseInsensitive bool `fsAttrShort:"F" fsAttrLong:"Casefold" fsAttrKern:"FS_CASEFOLD_FL" json:"case_ins" toml:"CaseInsensitive" yaml:"Case Insensitive" xml:"caseIns,attr"`
ReservedExt2 bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_RESERVED_FL" json:"res_ext2" toml:"ReservedExt2" yaml:"Reserved Ext2" xml:"resExt2,attr"`
}

23
go.mod
View File

@ -1,26 +1,13 @@
module r00t2.io/sysutils

go 1.21
go 1.23.2

require (
github.com/davecgh/go-spew v1.1.1
github.com/g0rbe/go-chattr v1.0.1
github.com/go-playground/validator/v10 v10.22.0
github.com/djherbis/times v1.6.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
golang.org/x/sys v0.19.0
golang.org/x/sync v0.9.0
golang.org/x/sys v0.26.0
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8
r00t2.io/goutils v1.6.0
r00t2.io/goutils v1.7.1
)

require (
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/text v0.14.0 // indirect
)

// Pending https://github.com/g0rbe/go-chattr/pull/3
replace github.com/g0rbe/go-chattr => github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13

39
go.sum
View File

@ -1,40 +1,19 @@
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13 h1:tgEbuE4bNVjaCWWIB1u9lDzGqH/ZdBTg33+4vNW2rjg=
github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13/go.mod h1:yQc6VPJfpDDC1g+W2t47+yYmzBNioax/GLiyJ25/IOs=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8 h1:FW42yWB1sGClqswyHIB68wo0+oPrav1IuQ+Tdy8Qp8E=
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8/go.mod h1:44w9OfBSQ9l3o59rc2w3AnABtE44bmtNnRMNC7z+oKE=
r00t2.io/goutils v1.6.0 h1:oBC6PgBv0y/fdHeCmWgORHpBiU8uWw7IfFQJX5rIuzY=
r00t2.io/goutils v1.6.0/go.mod h1:9ObJI9S71wDLTOahwoOPs19DhZVYrOh4LEHmQ8SW4Lk=
r00t2.io/goutils v1.7.1 h1:Yzl9rxX1sR9WT0FcjK60qqOgBoFBOGHYKZVtReVLoQc=
r00t2.io/goutils v1.7.1/go.mod h1:9ObJI9S71wDLTOahwoOPs19DhZVYrOh4LEHmQ8SW4Lk=
r00t2.io/sysutils v1.1.1/go.mod h1:Wlfi1rrJpoKBOjWiYM9rw2FaiZqraD6VpXyiHgoDo/o=

42
paths/consts.go Normal file
View File

@ -0,0 +1,42 @@
package paths

import (
"io/fs"
)

// Mostly just for reference.
const (
// ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeCharDevice | ModeIrregular
modeDir pathMode = pathMode(fs.ModeDir)
modeSymlink pathMode = pathMode(fs.ModeSymlink)
modePipe pathMode = pathMode(fs.ModeNamedPipe)
modeSocket pathMode = pathMode(fs.ModeSocket)
modeDev pathMode = pathMode(fs.ModeDevice)
modeCharDev pathMode = pathMode(fs.ModeCharDevice)
modeIrregular pathMode = pathMode(fs.ModeIrregular)
modeAnyExceptRegular pathMode = modeDir | modeSymlink | modePipe | modeSocket | modeDev | modeCharDev | modeIrregular
)

// Miss reasons
const (
MissNoMiss missReason = ""
MissNoMeta missReason = "Could not determine metadata"
MissBadBase missReason = "Base name does not match BasePtrn"
MissBadPath missReason = "Path does not match PathPtrn"
MissBadTime missReason = "Time(s) does not/do not match Age"
MissFile missReason = "Object is a file and NoFiles is set"
MissType missReason = "Object does not match TargetType"
)

// Times
const TimeAny pathTimeType = 0
const (
// TimeAccessed == atime
TimeAccessed pathTimeType = 1 << iota
// TimeCreated == "birth" time (*NOT* ctime! See TimeChanged)
TimeCreated
// TimeChanged == ctime
TimeChanged
// TimeModified == mtime
TimeModified
)

12
paths/errs.go Normal file
View File

@ -0,0 +1,12 @@
package paths

import (
`errors`
)

var (
ErrNilErrChan error = errors.New("an initialized error channel is required")
ErrNilMatchChan error = errors.New("an initialized matches channel is required")
ErrNilMismatchChan error = errors.New("an initialized mismatches channel is required")
ErrNilWg error = errors.New("a non-nil sync.WaitGroup is required")
)

View File

@ -19,14 +19,22 @@
package paths

import (
`context`
"errors"
"fmt"
"io/fs"
"os"
"os/user"
"path/filepath"
`sort`
"strings"
`sync`
`time`

// "syscall"

`github.com/djherbis/times`
`r00t2.io/goutils/bitmask`
)

/*
@ -266,3 +274,236 @@ func RealPathExistsStat(path *string) (exists bool, stat os.FileInfo, err error)

return
}

// SearchFsPaths gets a file/directory/etc. path list based on the provided criteria.
func SearchFsPaths(matcher FsSearchCriteria) (found, miss []*FsSearchResult, err error) {

var matched *FsSearchResult
var missed *FsSearchResult

if err = RealPath(&matcher.Root); err != nil {
return
}

if err = filepath.WalkDir(
matcher.Root,
func(path string, d fs.DirEntry, inErr error) (outErr error) {

if inErr != nil {
outErr = inErr
return
}

if matched, missed, outErr = matcher.Match(path, d, nil); outErr != nil {
return
}
if matched != nil && !matcher.NoMatch {
found = append(found, matched)
}
if missed != nil && !matcher.NoMismatch {
miss = append(miss, missed)
}

return
},
); err != nil {
return
}

if found == nil || len(found) == 0 {
return
}

// And sort them.
sort.Slice(
found,
func(i, j int) (isLess bool) {
isLess = found[i].Path < found[j].Path
return
},
)

return
}

/*
SearchFsPathsAsync is exactly like SearchFsPaths, but dispatches off concurrent
workers for the filtering logic instead of performing iteratively/recursively.
It may, in some cases, be *slightly more* performant and *slightly less* in others.
Note that unlike SearchFsPaths, the results written to the
FsSearchCriteriaAsync.ResChan are not guaranteed to be in any predictable order.

All channels are expected to have already been initialized by the caller.
They will not be closed by this function.
*/
func SearchFsPathsAsync(matcher FsSearchCriteriaAsync) {

var err error
var wgLocal sync.WaitGroup
var doneChan chan bool = make(chan bool, 1)

if matcher.ErrChan == nil {
panic(ErrNilErrChan)
return
}

if matcher.WG == nil {
matcher.ErrChan <- ErrNilWg
return
}

defer matcher.WG.Done()

if matcher.ResChan == nil && !matcher.NoMatch {
matcher.ErrChan <- ErrNilMatchChan
return
}
if matcher.MismatchChan == nil && !matcher.NoMismatch {
matcher.ErrChan <- ErrNilMismatchChan
return
}

if err = RealPath(&matcher.Root); err != nil {
matcher.ErrChan <- err
return
}

if matcher.Semaphore != nil && matcher.SemaphoreCtx == nil {
matcher.SemaphoreCtx = context.Background()
}

if err = filepath.WalkDir(
matcher.Root,
func(path string, de fs.DirEntry, inErr error) (outErr error) {

if inErr != nil {
inErr = filterNoFileDir(inErr)
if inErr != nil {
outErr = inErr
return
}
}

wgLocal.Add(1)
if matcher.Semaphore != nil {
if err = matcher.Semaphore.Acquire(matcher.SemaphoreCtx, 1); err != nil {
return
}
}

go func(p string, d fs.DirEntry) {
var pErr error
var pResMatch *FsSearchResult
var pResMiss *FsSearchResult

defer wgLocal.Done()

if matcher.Semaphore != nil {
defer matcher.Semaphore.Release(1)
}

if pResMatch, pResMiss, pErr = matcher.Match(p, d, nil); pErr != nil {
matcher.ErrChan <- pErr
return
}

if pResMatch != nil && !matcher.NoMatch {
matcher.ResChan <- pResMatch
}
if pResMiss != nil && !matcher.NoMismatch {
matcher.MismatchChan <- pResMiss
}
}(path, de)

return
},
); err != nil {
err = filterNoFileDir(err)
if err != nil {
matcher.ErrChan <- err
return
}
}

go func() {
wgLocal.Wait()
doneChan <- true
}()

<-doneChan

return
}

/*
filterTimes checks a times.Timespec of a file using:
* an age specified by the caller
* an ageType bitmask for types of times to compare
* an olderThan bool (if false, the file must be younger than)
* an optional "now" timestamp for the age derivation.
*/
func filterTimes(tspec times.Timespec, age *time.Duration, ageType *pathTimeType, olderThan bool, now *time.Time) (include bool) {

var curAge time.Duration
var mask *bitmask.MaskBit
var tfunc func(t *time.Duration) (match bool) = func(t *time.Duration) (match bool) {
if olderThan {
match = *t > *age
} else {
match = *t < *age
}
return
}

if tspec == nil || age == nil || ageType == nil {
return
}

mask = ageType.Mask()

if now == nil {
now = new(time.Time)
*now = time.Now()
}

// BTIME (if supported)
if tspec.HasBirthTime() && (mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeCreated))) {
curAge = now.Sub(tspec.BirthTime())
if include = tfunc(&curAge); include {
return
}
}
// MTIME
if mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeModified)) {
curAge = now.Sub(tspec.ModTime())
if include = tfunc(&curAge); include {
return
}
}
// CTIME (if supported)
if tspec.HasChangeTime() && (mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeChanged))) {
curAge = now.Sub(tspec.ChangeTime())
if include = tfunc(&curAge); include {
return
}
}
// ATIME
if mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeAccessed)) {
curAge = now.Sub(tspec.AccessTime())
if include = tfunc(&curAge); include {
return
}
}

return
}

func filterNoFileDir(err error) (filtered error) {

filtered = err
if errors.Is(err, fs.ErrNotExist) {
filtered = nil
}

return
}

View File

@ -0,0 +1,125 @@
package paths

import (
`io/fs`
`os`
`path/filepath`
`time`

`github.com/djherbis/times`
`r00t2.io/goutils/bitmask`
)

/*
Match returns match (a ptr to a FsSearchResult if the specified path matches, otherwise nil),
miss (ptr the specified path does not match, otherwise nil), and an fs.DirEntry and fs.FileInfo
for path. d and/or fi may be nil.

If err is not nil, it represents an unexpected error and as such, both match and miss should be nil.

Match, miss, and err will all be nil if the filesystem object/path does not exist.
*/
func (f *FsSearchCriteria) Match(path string, d fs.DirEntry, fi fs.FileInfo) (match, miss *FsSearchResult, err error) {

var typeMode fs.FileMode
var m FsSearchResult
var typeFilter *bitmask.MaskBit = bitmask.NewMaskBitExplicit(uint(f.TargetType))

m = FsSearchResult{
Path: path,
DirEntry: d,
FileInfo: fi,
Criteria: f,
}

if f == nil {
return
}

// A DirEntry can be created from a FileInfo but not vice versa.
if m.FileInfo == nil {
if m.DirEntry != nil {
if m.FileInfo, err = m.DirEntry.Info(); err != nil {
err = filterNoFileDir(err)
if err != nil {
return
}
}
} else {
if f.FollowSymlinks {
if m.FileInfo, err = os.Stat(path); err != nil {
err = filterNoFileDir(err)
if err != nil {
return
}
}
} else {
if m.FileInfo, err = os.Lstat(path); err != nil {
err = filterNoFileDir(err)
if err != nil {
return
}
}
}
m.DirEntry = fs.FileInfoToDirEntry(m.FileInfo)
}
}
if m.DirEntry == nil {
m.DirEntry = fs.FileInfoToDirEntry(m.FileInfo)
}
if m.DirEntry == nil || m.FileInfo == nil {
m.MissReason = MissNoMeta
miss = &m
return
}

if m.Times, err = times.Stat(path); err != nil {
err = filterNoFileDir(err)
if err != nil {
return
}
}

if f.PathPtrn != nil && !f.PathPtrn.MatchString(path) {
m.MissReason = MissBadPath
miss = &m
return
}
if f.BasePtrn != nil && !f.BasePtrn.MatchString(filepath.Base(path)) {
m.MissReason = MissBadBase
miss = &m
return
}

// age
if f.Age != nil {
if f.Now == nil {
f.Now = new(time.Time)
*f.Now = time.Now()
}
if !filterTimes(m.Times, f.Age, &f.AgeType, f.OlderThan, f.Now) {
m.MissReason = MissBadTime
miss = &m
return
}
}

// fs object type (file, dir, etc.)
typeMode = m.FileInfo.Mode().Type()
if typeMode == 0 && f.NoFiles {
m.MissReason = MissFile
miss = &m
return
} else if typeMode != 0 {
if !typeFilter.HasFlag(bitmask.MaskBit(typeMode)) {
m.MissReason = MissType
miss = &m
return
}
}

// If it gets to here, it matches.
match = &m

return
}

View File

@ -0,0 +1,13 @@
package paths

import (
`r00t2.io/goutils/bitmask`
)

// Mask returns a bitmask.MaskBit from a pathTimeType.
func (p *pathTimeType) Mask() (mask *bitmask.MaskBit) {

mask = bitmask.NewMaskBitExplicit(uint(*p))

return
}

136
paths/types.go Normal file
View File

@ -0,0 +1,136 @@
package paths

import (
`context`
`io/fs`
`regexp`
`sync`
`time`

`github.com/djherbis/times`
`golang.org/x/sync/semaphore`
`r00t2.io/goutils/bitmask`
)

// FsSearchCriteria contains filter criteria for SearchFsPaths* functions.
type FsSearchCriteria struct {
// Root indicates the root to search.
Root string `json:"root" toml:"RootPath" yaml:"Root Path" xml:"root,attr" validate:"dir"`
// NoMatch, if true, will not return matches. If NoMatch and NoMismatch are both true, no results will be returned.
NoMatch bool `json:"no_match" toml:"NoMatch" yaml:"No Matches" xml:"noMatch,attr"`
// NoMismatch, if true, will not return mismatches. If NoMatch and NoMismatch are both true, no results will be returned.
NoMismatch bool `json:"no_miss" toml:"NoMismatch" yaml:"No Mismatches" xml:"noMiss,attr"`
/*
TargetType defines what types of filesystem objects should be matched.
It can consist of one or more (io/)fs.FileMode types OR'd together
(ensure they are part of (io/)fs.ModeType).
(You can use 0 to match regular files explicitly, and/or NoFiles = true to exclude them.)
*/
TargetType fs.FileMode `json:"type_tgt" toml:"TargetType" yaml:"Target Type" xml:"typeTgt,attr"`
// NoFiles excludes files from TargetType-matching (as there isn't a way to explicitly exclude files otherwise if a non-zero mode is given).
NoFiles bool `json:"no_file" toml:"ExcludeFiles" yaml:"Exclude Files" xml:"noFile,attr"`
// FollowSymlinks, if true and a path being tested is a symlink, will use metadata (age, etc.) of the symlink itself rather than the link target.
FollowSymlinks bool `json:"follow_sym" toml:"FollowSymlinks" yaml:"Follow Symlinks" xml:"followSym,attr"`
// BasePtrn, if specified, will apply to the *base name (that is, quux.txt rather than /foo/bar/baz/quux.txt). See also PathPtrn.
BasePtrn *regexp.Regexp `json:"ptrn_base,omitempty" toml:"BaseNamePattern,omitempty" yaml:"Base Name Pattern,omitempty" xml:"ptrnBase,attr,omitempty"`
// PathPtrn, if specified, will apply to the *full path* (e.g. /foo/bar/baz/quux.txt, not just quux.txt). See also BasePtrn.
PathPtrn *regexp.Regexp `json:"ptrn_path,omitempty" toml:"PathPattern,omitempty" yaml:"Path Pattern,omitempty" xml:"ptrnPath,attr,omitempty"`
/*
Age, if specified, indicates the comparison of Now againt the AgeType of filesystem objects.
Use OlderThan to indicate if it should be older or newer.
*/
Age *time.Duration `json:"age,omitempty" toml:"Age,omitempty" yaml:"Age,omitempty" xml:"age,attr,omitempty"`
/*
AgeType can be one (or more, OR'd together) of the Time* constants in this package (TimeAny, TimeAccessed, TimeCreated,
TimeChanged, TimeModified) to indicate what timestamp(s) to use for comparing Age.

The zero-value is TimeAny.

The first matching timestamp will pass all time comparisons.
Be mindful of timestamp type support/limitations per OS/filesystem of Root.

Completely unused if Age is nil.
*/
AgeType pathTimeType `json:"type_age" toml:"AgeType" yaml:"Age Type" xml:"typeAge,attr"`
/*
OlderThan, if true (and Age is not nil), indicates that matching filesystem objects should have their
AgeType older than Now. If false, their AgeType should be *newer* than Now.

Completely unused if Age is nil.
*/
OlderThan bool `json:"older" toml:"OlderThan" yaml:"Older Than" xml:"older,attr"`
/*
Now expresses a time to compare to Age via AgeType and OlderThan.
Note that it may be any valid time, not necessarily "now".
If Age is specified but Now is nil, it will be populated with time.Now() when the search is invoked.

Completely unused if Age is nil.
*/
Now *time.Time `json:"now,omitempty" toml:"Now,omitempty" yaml:"Now,omitempty" xml:"now,attr,omitempty"`
}

// FsSearchCriteriaAsync extends FsSearchCriteria for use in an asynchronous (goroutine) manner.
type FsSearchCriteriaAsync struct {
FsSearchCriteria
/*
WG should be a non-nil pointer to a sync.WaitGroup.
This is used to manage searching completion to the caller.

.Done() will be called once within the search function, but no .Add() will be called;
.Add() should be done by the caller beforehand.
*/
WG *sync.WaitGroup
// ResChan must be a non-nil channel for (positive) match results to be sent to.
ResChan chan *FsSearchResult
// MismatchChan, if not nil, will have negative matches/"misses" sent to it.
MismatchChan chan *FsSearchResult
/*
ErrChan should be a non-nil error channel for any unexpected errors encountered.

If nil, a panic will be raised.
*/
ErrChan chan error
/*
Semaphore is completely optional, but if non-nil
it will be used to limit concurrent filesystem
object processing.

It is generally a Very Good Idea(TM) to use this,
as the default is to dispatch all processing concurrently.
This can lead to some heavy I/O and CPU wait.

(See https://pkg.go.dev/golang.org/x/sync/semaphore for details.)
*/
Semaphore *semaphore.Weighted
/*
SemaphoreCtx is the context.Context to use for Semaphore.
If nil (but Sempaphore is not), one will be created locally/internally.
*/
SemaphoreCtx context.Context
}

// FsSearchResult contains a match/miss result for FsSearchCriteria and FsSearchCriteriaAsync.
type FsSearchResult struct {
/*
Path is the path to the object on the filesystem.
It may or may not exist at the time of return,
but will not be an empty string.
*/
Path string `json:"path" toml:"Path" yaml:"Path" xml:"path,attr"`
// DirEntry is the fs.DirEntry for the Path; note that .Name() is the base name only. TODO: serialization?
DirEntry fs.DirEntry `json:"-" toml:"-" yaml:"-" xml:"-"`
// FileInfo is the fs.FileInfo for the Path; note that .Name() is the base name only. TODO: serialization?
FileInfo fs.FileInfo `json:"-" toml:"-" yaml:"-" xml:"-"`
// Criteria is the evaluated criteria specified that this FsSearchResult matched.
Criteria *FsSearchCriteria `json:"criteria" toml:"Criteria" yaml:"Criteria" xml:"criteria"`
// Times holds the mtime, ctime, etc. of the filesystem object (where supported). TODO: serialization?
Times times.Timespec `json:"-" toml:"-" yaml:"-" xml:"-"`
// MissReason contains the reason the result is a miss (MissNoMiss if a match); see the Miss* constants.
MissReason missReason `json:"miss_reason" toml:"MissReason" yaml:"Miss Reason" xml:"miss,attr"`
}

type missReason string

type pathMode bitmask.MaskBit

type pathTimeType bitmask.MaskBit