Compare commits
22 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d7db23d58c | ||
![]() |
5a62622892 | ||
![]() |
e797a14911 | ||
![]() |
8260e4fa93 | ||
![]() |
e5f7296d2e | ||
![]() |
82f58d4fbf | ||
![]() |
772324247a | ||
![]() |
7b0156775c | ||
![]() |
c6efc2d83c | ||
![]() |
eefe02afaf | ||
![]() |
b82f0c02ed | ||
![]() |
903dd00c81 | ||
![]() |
70a88ca8b4 | ||
![]() |
9dbc3a00fe | ||
![]() |
e9b7c5539a | ||
![]() |
236165bec8 | ||
![]() |
4cb0403e08 | ||
![]() |
0318a9759b | ||
![]() |
1a93d5d9f3 | ||
![]() |
5dc944cf21 | ||
![]() |
77a85a4f84 | ||
![]() |
43d1ddfeb8 |
3
.gitignore
vendored
3
.gitignore
vendored
@ -29,6 +29,9 @@
|
|||||||
# Test binary, built with `go test -c`
|
# Test binary, built with `go test -c`
|
||||||
*.test
|
*.test
|
||||||
|
|
||||||
|
# Test file
|
||||||
|
fsutils/testfile
|
||||||
|
|
||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
*.out
|
*.out
|
||||||
|
|
||||||
|
7
TODO
7
TODO
@ -1,8 +1,9 @@
|
|||||||
|
- refactor the elevation detection stuff. I'm not terribly happy with it.
|
||||||
|
|
||||||
- password generator utility/library
|
- password generator utility/library
|
||||||
|
-- incorporate with r00t2.io/pwgen
|
||||||
-- incorporate with https://github.com/tredoe/osutil ?
|
-- incorporate with https://github.com/tredoe/osutil ?
|
||||||
-- cli flag to dump flat hashes too
|
-- cli flag to dump flat hashes too (https://github.com/hlandau/passlib and others soon in pwgen)
|
||||||
--- https://github.com/hlandau/passlib
|
|
||||||
-- incoprporated separately; https://git.r00t2.io/r00t2/PWGen (import r00t2.io/pwgen)
|
|
||||||
|
|
||||||
- auger needs to be build-constrained to linux.
|
- auger needs to be build-constrained to linux.
|
||||||
|
|
||||||
|
1
auger/TODO
Normal file
1
auger/TODO
Normal file
@ -0,0 +1 @@
|
|||||||
|
This module is still under work.
|
@ -7,3 +7,35 @@ const (
|
|||||||
augInclTfm string = "incl" // The transformer keyword for Augeas includes.
|
augInclTfm string = "incl" // The transformer keyword for Augeas includes.
|
||||||
augAppendSuffix string = "[last()+1]"
|
augAppendSuffix string = "[last()+1]"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
dstPtrTrue bool = true
|
||||||
|
dstPtrFalse bool = false
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// PtrTrue and PtrFalse are convenience references for constructing an AugFlags if needed. It is recommended you do not change these values if you do not like being confused.
|
||||||
|
PtrTrue *bool = &dstPtrTrue
|
||||||
|
PtrFalse *bool = &dstPtrFalse
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
IncludeOptNone is the default include recursion option for Aug.RecursiveInclude.
|
||||||
|
* No special behavior is defined
|
||||||
|
* All include directives are assumed to refer:
|
||||||
|
* Explicitly/exclusively to file paths
|
||||||
|
* That must exist
|
||||||
|
*/
|
||||||
|
const IncludeOptNone includeOpt = 0
|
||||||
|
const (
|
||||||
|
// IncludeOptNoExist specifies that inclusions are allowed to not exist, otherwise an error will be raised while attempting to parse them.
|
||||||
|
IncludeOptNoExist includeOpt = 1 << iota
|
||||||
|
// IncludeOptGlobbing indicates that the inclusion system supports globbing (as supported by (github.com/gobwas/glob).Match).
|
||||||
|
IncludeOptGlobbing
|
||||||
|
// IncludeOptRegex indicates that the inclusion system supports matching by regex (as supported by regexp).
|
||||||
|
IncludeOptRegex
|
||||||
|
// IncludeOptDirs indicates that the inclusion system supports matching by directory.
|
||||||
|
IncludeOptDirs
|
||||||
|
// IncludeOptDirsRecursive indicates that the inclusion system also recurses into subdirectories of matched directories. Only used if IncludeOptDirs is also set.
|
||||||
|
IncludeOptDirsRecursive
|
||||||
|
)
|
||||||
|
@ -4,15 +4,55 @@ import (
|
|||||||
`io/fs`
|
`io/fs`
|
||||||
`os`
|
`os`
|
||||||
`strings`
|
`strings`
|
||||||
|
|
||||||
|
`honnef.co/go/augeas`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
AugpathToFspath returns the filesystem path from an Augeas path.
|
NewAuger returns an auger.Aug.
|
||||||
|
|
||||||
|
See:
|
||||||
|
https://pkg.go.dev/honnef.co/go/augeas#readme-examples
|
||||||
|
https://pkg.go.dev/honnef.co/go/augeas#New
|
||||||
|
for the `root` and `loadPath` parameters
|
||||||
|
(and, by extension, the `flags` paraemter; note that the `flags`
|
||||||
|
is an auger.AugFlags, not an augeas.Flag!).
|
||||||
|
|
||||||
|
`flags` may be nil.
|
||||||
|
*/
|
||||||
|
func NewAuger(root, loadPath string, flags *AugFlags) (aug *Aug, err error) {
|
||||||
|
|
||||||
|
aug = new(Aug)
|
||||||
|
|
||||||
|
if aug.aug, err = augeas.New(root, loadPath, flags.Eval()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAugerFromAugeas returns a wrapped auger.Aug from a (honnef.co/go/augeas).Augeas.
|
||||||
|
func NewAugerFromAugeas(orig augeas.Augeas) (aug *Aug) {
|
||||||
|
|
||||||
|
aug = new(Aug)
|
||||||
|
aug.aug = orig
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AugpathToFspath returns the filesystem path (i.e. an existing file) from an Augeas path.
|
||||||
|
|
||||||
It is *required* and expected that the Augeas standard /files prefix be removed first;
|
It is *required* and expected that the Augeas standard /files prefix be removed first;
|
||||||
if not, it is assumed to be part of the filesystem path.
|
if not, it is assumed to be part of the filesystem path.
|
||||||
|
|
||||||
If a valid path cannot be determined, fsPath will be empty.
|
If a valid path cannot be determined, fsPath will be empty.
|
||||||
|
|
||||||
|
To be clear, a file must exist for fsPath to not be empty;
|
||||||
|
the way AugpathToFsPath works is it recurses bottom-up a
|
||||||
|
given path and checks for the existence of a file,
|
||||||
|
continuing upwards if not found.
|
||||||
*/
|
*/
|
||||||
func AugpathToFspath(augPath string) (fsPath string, err error) {
|
func AugpathToFspath(augPath string) (fsPath string, err error) {
|
||||||
|
|
||||||
@ -61,3 +101,11 @@ func dedupePaths(new, existing []string) (missing []string) {
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getInclPaths applies path options to inclusions.
|
||||||
|
func getInclPaths(pathSpec string, inclFlags *bitmask.MaskBit) (fpaths []string, err error) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
`github.com/davecgh/go-spew/spew`
|
`github.com/davecgh/go-spew/spew`
|
||||||
`github.com/google/shlex`
|
`github.com/google/shlex`
|
||||||
`honnef.co/go/augeas`
|
`honnef.co/go/augeas`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
`r00t2.io/sysutils/paths`
|
`r00t2.io/sysutils/paths`
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -146,10 +147,21 @@ breakCmd:
|
|||||||
An error will be returned if augLens is a nonexistent or not-loaded Augeas lens module.
|
An error will be returned if augLens is a nonexistent or not-loaded Augeas lens module.
|
||||||
|
|
||||||
Depending on how many files there are and whether globs vs. explicit filepaths are included, this may take a while.
|
Depending on how many files there are and whether globs vs. explicit filepaths are included, this may take a while.
|
||||||
*/
|
|
||||||
func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string) (err error) {
|
|
||||||
|
|
||||||
if err = a.addIncl(includeDirective, augLens, fsRoot, nil); err != nil {
|
optFlags may be nil, multiple includeOpt (see the IncludeOpt* constants) as variadic parameters/expanded slice,
|
||||||
|
bitwise-OR'd together, or multiple non-OR'd and OR'd together (all will be combined to a single value).
|
||||||
|
*/
|
||||||
|
func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string, optFlags ...includeOpt) (err error) {
|
||||||
|
|
||||||
|
var flags *bitmask.MaskBit = bitmask.NewMaskBit()
|
||||||
|
|
||||||
|
if optFlags != nil && len(optFlags) > 0 {
|
||||||
|
for _, f := range optFlags {
|
||||||
|
flags.AddFlag(f.toMb())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = a.addIncl(includeDirective, augLens, fsRoot, nil, flags); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,14 +176,16 @@ func (a *Aug) RecursiveInclude(augLens, includeDirective, fsRoot string) (err er
|
|||||||
newInclPaths are new filesystem paths/Augeas-compatible glob patterns to load into the filetree and recurse into.
|
newInclPaths are new filesystem paths/Augeas-compatible glob patterns to load into the filetree and recurse into.
|
||||||
They may be nil, especially if the first run.
|
They may be nil, especially if the first run.
|
||||||
*/
|
*/
|
||||||
func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPaths []string) (err error) {
|
func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPaths []string, inclFlags *bitmask.MaskBit) (err error) {
|
||||||
|
|
||||||
var matches []string // Passed around set of Augeas matches.
|
var matches []string // Passed around set of Augeas matches.
|
||||||
|
var exists bool // Used to indicate if the include path exists.
|
||||||
var includes []string // Filepath(s)/glob(s) from fetching includeDirective in lensInclPath. These are internal to the application but are recursed.
|
var includes []string // Filepath(s)/glob(s) from fetching includeDirective in lensInclPath. These are internal to the application but are recursed.
|
||||||
var lensInclPath string // The path of the included paths in the tree. These are internal to Augeas, not the application.
|
var lensInclPath string // The path of the included paths in the tree. These are internal to Augeas, not the application.
|
||||||
var appendPath string // The path for new Augeas includes.
|
var appendPath string // The path for new Augeas includes.
|
||||||
var match []string // A placeholder for iterating when populating includes.
|
var match []string // A placeholder for iterating when populating includes.
|
||||||
var fpath string // A placeholder for finding the path of a conf file that contains an includeDirective.
|
var fpath string // A placeholder for finding the path of a conf file that contains an includeDirective.
|
||||||
|
var normalizedIncludes []string // A temporary slice to hold normalization operations and other dynamic building.
|
||||||
var lensPath string = fmt.Sprintf(augLensTpl, augLens) // The path of the lens (augLens) itself.
|
var lensPath string = fmt.Sprintf(augLensTpl, augLens) // The path of the lens (augLens) itself.
|
||||||
var augErr *augeas.Error = new(augeas.Error) // We use this to skip "nonexistent" lens.
|
var augErr *augeas.Error = new(augeas.Error) // We use this to skip "nonexistent" lens.
|
||||||
|
|
||||||
@ -193,7 +207,7 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa
|
|||||||
|
|
||||||
// First canonize paths.
|
// First canonize paths.
|
||||||
if newInclPaths != nil && len(newInclPaths) > 0 {
|
if newInclPaths != nil && len(newInclPaths) > 0 {
|
||||||
// Existing includes. We don't return on an empty lensInclPath because
|
// Existing includes. We don't return on an empty lensInclPath.
|
||||||
if matches, err = a.aug.Match(lensInclPath); err != nil {
|
if matches, err = a.aug.Match(lensInclPath); err != nil {
|
||||||
if errors.As(err, augErr) && augErr.Code == augeas.NoMatch {
|
if errors.As(err, augErr) && augErr.Code == augeas.NoMatch {
|
||||||
err = nil
|
err = nil
|
||||||
@ -221,6 +235,17 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa
|
|||||||
// We don't want to bother adding multiple incl's for the same path(s); it can negatively affect Augeas loads.
|
// We don't want to bother adding multiple incl's for the same path(s); it can negatively affect Augeas loads.
|
||||||
newInclPaths = dedupePaths(newInclPaths, matches)
|
newInclPaths = dedupePaths(newInclPaths, matches)
|
||||||
|
|
||||||
|
// And then apply things like recursion, globbing, etc.
|
||||||
|
normalizedIncludes = make([]string, 0, len(newInclPaths))
|
||||||
|
if inclFlags.HasFlag(IncludeOptGlobbing.toMb()) {
|
||||||
|
// TODO
|
||||||
|
/*
|
||||||
|
if strings.Contains(newInclPaths[idx], "*") {
|
||||||
|
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
// Add the new path(s) as Augeas include entries.
|
// Add the new path(s) as Augeas include entries.
|
||||||
if newInclPaths != nil {
|
if newInclPaths != nil {
|
||||||
for _, fsPath := range newInclPaths {
|
for _, fsPath := range newInclPaths {
|
||||||
@ -285,10 +310,13 @@ func (a *Aug) addIncl(includeDirective, augLens string, fsRoot string, newInclPa
|
|||||||
}
|
}
|
||||||
|
|
||||||
if matches != nil && len(matches) != 0 {
|
if matches != nil && len(matches) != 0 {
|
||||||
if err = a.addIncl(includeDirective, augLens, fsRoot, matches); err != nil {
|
if err = a.addIncl(includeDirective, augLens, fsRoot, matches, inclFlags); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
_, _ = exists, normalizedIncludes
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,10 @@ import (
|
|||||||
// Eval returns an evaluated set of flags.
|
// Eval returns an evaluated set of flags.
|
||||||
func (a *AugFlags) Eval() (augFlags augeas.Flag) {
|
func (a *AugFlags) Eval() (augFlags augeas.Flag) {
|
||||||
|
|
||||||
|
if a == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
augFlags = augeas.None
|
augFlags = augeas.None
|
||||||
|
|
||||||
if a.Backup != nil && *a.Backup {
|
if a.Backup != nil && *a.Backup {
|
||||||
|
13
auger/funcs_includeopt.go
Normal file
13
auger/funcs_includeopt.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package auger
|
||||||
|
|
||||||
|
import (
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
)
|
||||||
|
|
||||||
|
// toMb returns a bitmask.MaskBit of this includeOpt.
|
||||||
|
func (i includeOpt) toMb() (mb bitmask.MaskBit) {
|
||||||
|
|
||||||
|
mb = bitmask.MaskBit(i)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
39
auger/funcs_test.go
Normal file
39
auger/funcs_test.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package auger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
`honnef.co/go/augeas`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewAuger(t *testing.T) {
|
||||||
|
|
||||||
|
var aug *Aug
|
||||||
|
var augUnder augeas.Augeas
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if aug, err = NewAuger("/", "", nil); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
augUnder = aug.aug
|
||||||
|
|
||||||
|
aug = NewAugerFromAugeas(augUnder)
|
||||||
|
|
||||||
|
_ = aug
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRecursiveInclude(t *testing.T) {
|
||||||
|
|
||||||
|
var aug *Aug
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if aug, err = NewAuger("/", "", &AugFlags{DryRun: PtrTrue}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This requires Nginx to be installed and with a particularly complex nested include system.
|
||||||
|
if err = aug.RecursiveInclude("Nginx", "include", "/etc/nginx"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
@ -2,8 +2,11 @@ package auger
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
`honnef.co/go/augeas`
|
`honnef.co/go/augeas`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type includeOpt bitmask.MaskBit
|
||||||
|
|
||||||
// Aug is a wrapper around (honnef.co/go/)augeas.Augeas. Remember to call Aug.Close().
|
// Aug is a wrapper around (honnef.co/go/)augeas.Augeas. Remember to call Aug.Close().
|
||||||
type Aug struct {
|
type Aug struct {
|
||||||
aug augeas.Augeas
|
aug augeas.Augeas
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
- PKCS#12/PFX parsing/support
|
|
||||||
|
|
||||||
- Move to struct tags and reflection, so it can not only be easier to maintain in the future but also be implemented in custom structs downstream.
|
|
@ -1,123 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`crypto/tls`
|
|
||||||
|
|
||||||
`github.com/go-playground/validator/v10`
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
tlsVerNmToUint map[string]uint16
|
|
||||||
tlsCipherNmToUint map[string]uint16
|
|
||||||
tlsCurveNmToCurve map[string]tls.CurveID
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
MaxTlsCipher uint16 = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
|
|
||||||
MaxCurveId tls.CurveID = tls.X25519 // 29
|
|
||||||
MinTlsVer uint16 = tls.VersionSSL30
|
|
||||||
MaxTlsVer uint16 = tls.VersionTLS13
|
|
||||||
)
|
|
||||||
|
|
||||||
// TlsUriParam* specifiy URL query parameters to parse a tls:// URI.
|
|
||||||
const (
|
|
||||||
/*
|
|
||||||
TlsUriParamCa specifies a path to a CA certificate PEM-encded DER file.
|
|
||||||
|
|
||||||
It may be specified multiple times in a TLS URI.
|
|
||||||
*/
|
|
||||||
TlsUriParamCa string = "pki_ca"
|
|
||||||
/*
|
|
||||||
TlsUriParamCert specifies a path to a client certificate PEM-encded DER file.
|
|
||||||
|
|
||||||
It may be specified multiple times in a TLS URI.
|
|
||||||
*/
|
|
||||||
TlsUriParamCert string = "pki_cert"
|
|
||||||
/*
|
|
||||||
TlsUriParamKey specifies a path to a private key as a PEM-encded file.
|
|
||||||
|
|
||||||
It may be PKCS#1, PKCS#8, or PEM-encoded ASN.1 DER EC key.
|
|
||||||
|
|
||||||
Supported private key types are RSA, ED25519, ECDSA, and ECDH.
|
|
||||||
|
|
||||||
It may be specified multiple times in a TLS URI.
|
|
||||||
*/
|
|
||||||
TlsUriParamKey string = "pki_key"
|
|
||||||
/*
|
|
||||||
TlsUriParamNoVerify, if `1`, `yes`, `y`, or `true` indicate
|
|
||||||
that the TLS connection should not require verification of
|
|
||||||
the remote end (e.g. hostname matches, trusted chain, etc.).
|
|
||||||
|
|
||||||
Any other value for this parameter will be parsed as "False"
|
|
||||||
(meaning the remote end's certificate SHOULD be verified).
|
|
||||||
|
|
||||||
Only the first defined instance is parsed.
|
|
||||||
*/
|
|
||||||
TlsUriParamNoVerify string = "no_verify"
|
|
||||||
/*
|
|
||||||
TlsUriParamSni indicates that the TLS connection should expect this hostname
|
|
||||||
instead of the hostname specified in the URI itself.
|
|
||||||
|
|
||||||
Only the first defined instance is parsed.
|
|
||||||
*/
|
|
||||||
TlsUriParamSni string = "sni"
|
|
||||||
/*
|
|
||||||
TlsUriParamCipher specifies one (or more) cipher(s)
|
|
||||||
to specify for the TLS connection cipher negotiation.
|
|
||||||
Note that TLS 1.3 has a fixed set of ciphers, and
|
|
||||||
this list may not be respected by the remote end.
|
|
||||||
|
|
||||||
The string may either be the name (as per
|
|
||||||
https://www.iana.org/assignments/tls-parameters/tls-parameters.xml)
|
|
||||||
or an int (normal, hex, etc. string representation).
|
|
||||||
|
|
||||||
It may be specified multiple times in a TLS URI.
|
|
||||||
*/
|
|
||||||
TlsUriParamCipher string = "cipher"
|
|
||||||
/*
|
|
||||||
TlsUriParamCurve specifies one (or more) curve(s)
|
|
||||||
to specify for the TLS connection cipher negotiation.
|
|
||||||
|
|
||||||
It may be specified multiple times in a TLS URI.
|
|
||||||
*/
|
|
||||||
TlsUriParamCurve string = "curve"
|
|
||||||
/*
|
|
||||||
TlsUriParamMinTls defines the minimum version of the
|
|
||||||
TLS protocol to use.
|
|
||||||
It is recommended to use "TLS_1.3".
|
|
||||||
|
|
||||||
Supported syntax formats include:
|
|
||||||
|
|
||||||
* TLS_1.3
|
|
||||||
* 1.3
|
|
||||||
* v1.3
|
|
||||||
* TLSv1.3
|
|
||||||
* 0x0304 (legacy_version, see RFC8446 § 4.1.2)
|
|
||||||
* 774 (0x0304 in int form)
|
|
||||||
* 0o1404 (0x0304 in octal form)
|
|
||||||
|
|
||||||
All evaluate to TLS 1.3 in this example.
|
|
||||||
|
|
||||||
Only the first defined instance is parsed.
|
|
||||||
*/
|
|
||||||
TlsUriParamMinTls string = "min_tls"
|
|
||||||
/*
|
|
||||||
TlsUriParamMaxTls defines the minimum version of the
|
|
||||||
TLS protocol to use.
|
|
||||||
|
|
||||||
See TlsUriParamMinTls for syntax of the value.
|
|
||||||
|
|
||||||
Only the first defined instance is parsed.
|
|
||||||
*/
|
|
||||||
TlsUriParamMaxTls string = "max_tls"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
paramBoolValsTrue []string = []string{
|
|
||||||
"1", "yes", "y", "true",
|
|
||||||
}
|
|
||||||
paramBoolValsFalse []string = []string{
|
|
||||||
"0", "no", "n", "false",
|
|
||||||
}
|
|
||||||
validate *validator.Validate = validator.New(validator.WithRequiredStructEnabled())
|
|
||||||
)
|
|
6
cryptparse/doc.go
Normal file
6
cryptparse/doc.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
/*
|
||||||
|
CRYPTPARSE HAS MOVED.
|
||||||
|
|
||||||
|
It is now its own module: r00t2.io/cryptparse
|
||||||
|
*/
|
||||||
|
package cryptparse
|
@ -1,12 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`errors`
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrBadTlsCipher error = errors.New("invalid TLS cipher suite")
|
|
||||||
ErrBadTlsCurve error = errors.New("invalid TLS curve")
|
|
||||||
ErrBadTlsVer error = errors.New("invalid TLS version")
|
|
||||||
ErrUnknownKey error = errors.New("unknown key type")
|
|
||||||
)
|
|
@ -1,751 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`bytes`
|
|
||||||
`crypto`
|
|
||||||
`crypto/ecdh`
|
|
||||||
`crypto/ecdsa`
|
|
||||||
`crypto/ed25519`
|
|
||||||
`crypto/rsa`
|
|
||||||
`crypto/tls`
|
|
||||||
`crypto/x509`
|
|
||||||
`encoding/pem`
|
|
||||||
`errors`
|
|
||||||
`net/url`
|
|
||||||
`os`
|
|
||||||
`strconv`
|
|
||||||
`strings`
|
|
||||||
|
|
||||||
`r00t2.io/sysutils/paths`
|
|
||||||
)
|
|
||||||
|
|
||||||
// FromURL returns a *TlsUri from a *url.URL.
|
|
||||||
func FromURL(u *url.URL) (t *TlsUri) {
|
|
||||||
|
|
||||||
var newU *url.URL
|
|
||||||
|
|
||||||
if u == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
newU = new(url.URL)
|
|
||||||
*newU = *u
|
|
||||||
if u.User != nil {
|
|
||||||
newU.User = new(url.Userinfo)
|
|
||||||
*newU.User = *u.User
|
|
||||||
}
|
|
||||||
|
|
||||||
newU.Scheme = "tls"
|
|
||||||
|
|
||||||
t = &TlsUri{
|
|
||||||
URL: newU,
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsMatchedPair returns true if the privateKey is paired with the cert.
|
|
||||||
func IsMatchedPair(privKey crypto.PrivateKey, cert *x509.Certificate) (isMatched bool, err error) {
|
|
||||||
|
|
||||||
var pubkey crypto.PublicKey
|
|
||||||
|
|
||||||
if cert == nil || privKey == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pubkey = cert.PublicKey
|
|
||||||
|
|
||||||
switch k := privKey.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
if p, ok := pubkey.(*rsa.PublicKey); ok {
|
|
||||||
isMatched = k.PublicKey.Equal(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
if p, ok := pubkey.(ed25519.PublicKey); ok {
|
|
||||||
// Order is flipped here because unlike the other key types, an ed25519.PrivateKey is just a []byte.
|
|
||||||
isMatched = p.Equal(k.Public())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case *ecdh.PrivateKey:
|
|
||||||
if p, ok := pubkey.(*ecdh.PublicKey); ok {
|
|
||||||
isMatched = k.PublicKey().Equal(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
if p, ok := pubkey.(*ecdsa.PublicKey); ok {
|
|
||||||
isMatched = k.PublicKey.Equal(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we got here, we can't determine either the private key type or the cert's public key type.
|
|
||||||
err = ErrUnknownKey
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseTlsCipher parses string s and attempts to derive a TLS cipher suite (as a uint16) from it.
|
|
||||||
Use ParseTlsCipherSuite if you wish for a tls.CipherSuite instead.
|
|
||||||
|
|
||||||
The string may either be the name (as per https://www.iana.org/assignments/tls-parameters/tls-parameters.xml)
|
|
||||||
or an int (normal, hex, etc. string representation).
|
|
||||||
|
|
||||||
If none is found, the default is MaxTlsCipher.
|
|
||||||
*/
|
|
||||||
func ParseTlsCipher(s string) (cipherSuite uint16, err error) {
|
|
||||||
|
|
||||||
var nm string
|
|
||||||
var n uint64
|
|
||||||
var i uint16
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
if n, err = strconv.ParseUint(s, 10, 16); err != nil {
|
|
||||||
if errors.Is(err, strconv.ErrSyntax) {
|
|
||||||
// It's a name; parse below.
|
|
||||||
err = nil
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// It's a number.
|
|
||||||
if nm = tls.CipherSuiteName(uint16(n)); strings.HasPrefix(nm, "0x") {
|
|
||||||
// ...but invalid.
|
|
||||||
err = ErrBadTlsCipher
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
// Valid (as number). Return it.
|
|
||||||
cipherSuite = uint16(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s = strings.ToUpper(s)
|
|
||||||
s = strings.ReplaceAll(s, " ", "_")
|
|
||||||
|
|
||||||
// We build a dynamic map of cipher suite names to uint16s (if not already created).
|
|
||||||
if tlsCipherNmToUint == nil {
|
|
||||||
tlsCipherNmToUint = make(map[string]uint16)
|
|
||||||
for i = 0; i <= MaxTlsCipher; i++ {
|
|
||||||
if nm = tls.VersionName(i); !strings.HasPrefix(nm, "0x") {
|
|
||||||
tlsCipherNmToUint[nm] = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cipherSuite = MaxTlsCipher
|
|
||||||
if i, ok = tlsCipherNmToUint[s]; ok {
|
|
||||||
cipherSuite = i
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseTlsCiphers parses s as a comma-separated list of cipher suite names/integers and returns a slice of suites.
|
|
||||||
|
|
||||||
See ParseTlsCipher for details, as this is mostly just a wrapper around it.
|
|
||||||
|
|
||||||
If no cipher suites are found, cipherSuites will only contain MaxTlsCipher.
|
|
||||||
*/
|
|
||||||
func ParseTlsCiphers(s string) (cipherSuites []uint16) {
|
|
||||||
|
|
||||||
var suiteNms []string
|
|
||||||
var cipher uint16
|
|
||||||
var err error
|
|
||||||
|
|
||||||
suiteNms = strings.Split(s, ",")
|
|
||||||
cipherSuites = make([]uint16, 0, len(suiteNms))
|
|
||||||
|
|
||||||
for _, nm := range suiteNms {
|
|
||||||
if cipher, err = ParseTlsCipher(nm); err != nil {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cipherSuites = append(cipherSuites, cipher)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cipherSuites) == 0 {
|
|
||||||
cipherSuites = []uint16{tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTlsCipherSuite is like ParseTlsCipher but returns a *tls.CipherSuite instead of a uint16 TLS cipher identifier.
|
|
||||||
func ParseTlsCipherSuite(s string) (cipherSuite *tls.CipherSuite, err error) {
|
|
||||||
|
|
||||||
var cipherId uint16
|
|
||||||
|
|
||||||
if cipherId, err = ParseTlsCipher(s); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range tls.CipherSuites() {
|
|
||||||
if v.ID == cipherId {
|
|
||||||
cipherSuite = v
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, v := range tls.InsecureCipherSuites() {
|
|
||||||
if v.ID == cipherId {
|
|
||||||
cipherSuite = v
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTlsCipherSuites is like ParseTlsCiphers but returns a []*tls.CipherSuite instead of a []uint16 of TLS cipher identifiers.
|
|
||||||
func ParseTlsCipherSuites(s string) (cipherSuites []*tls.CipherSuite, err error) {
|
|
||||||
|
|
||||||
var found bool
|
|
||||||
var cipherIds []uint16
|
|
||||||
|
|
||||||
cipherIds = ParseTlsCiphers(s)
|
|
||||||
|
|
||||||
for _, cipherId := range cipherIds {
|
|
||||||
found = false
|
|
||||||
for _, v := range tls.CipherSuites() {
|
|
||||||
if v.ID == cipherId {
|
|
||||||
cipherSuites = append(cipherSuites, v)
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
for _, v := range tls.InsecureCipherSuites() {
|
|
||||||
if v.ID == cipherId {
|
|
||||||
cipherSuites = append(cipherSuites, v)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseTlsCurve parses string s and attempts to derive a tls.CurveID from it.
|
|
||||||
|
|
||||||
The string may either be the name (as per // https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8)
|
|
||||||
or an int (normal, hex, etc. string representation).
|
|
||||||
*/
|
|
||||||
func ParseTlsCurve(s string) (curve tls.CurveID, err error) {
|
|
||||||
|
|
||||||
var i tls.CurveID
|
|
||||||
var n uint64
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
if n, err = strconv.ParseUint(s, 10, 16); err != nil {
|
|
||||||
if errors.Is(err, strconv.ErrSyntax) {
|
|
||||||
// It's a name; parse below.
|
|
||||||
err = nil
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// It's a number.
|
|
||||||
if strings.HasPrefix(tls.CurveID(uint16(n)).String(), "CurveID(") {
|
|
||||||
// ...but invalid.
|
|
||||||
err = ErrBadTlsCurve
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
// Valid (as number). Return it.
|
|
||||||
curve = tls.CurveID(uint16(n))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// It seems to be a name. Normalize...
|
|
||||||
s = strings.ToUpper(s)
|
|
||||||
|
|
||||||
// Unfortunately there's no "tls.CurveIDName()" function.
|
|
||||||
// They do have a .String() method though.
|
|
||||||
if tlsCurveNmToCurve == nil {
|
|
||||||
tlsCurveNmToCurve = make(map[string]tls.CurveID)
|
|
||||||
for i = 0; i <= MaxCurveId; i++ {
|
|
||||||
if strings.HasPrefix(i.String(), "CurveID(") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tlsCurveNmToCurve[i.String()] = i
|
|
||||||
// It's normally mixed-case; we want to be able to look it up in a normalized all-caps as well.
|
|
||||||
tlsCurveNmToCurve[strings.ToUpper(i.String())] = i
|
|
||||||
// The normal name, except for X25519, has "Curve" in the front. We add it without that prefix as well.
|
|
||||||
tlsCurveNmToCurve[strings.TrimPrefix(i.String(), "Curve")] = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
curve = MaxCurveId
|
|
||||||
if _, ok = tlsCurveNmToCurve[s]; ok {
|
|
||||||
curve = tlsCurveNmToCurve[s]
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseTlsCurves parses s as a comma-separated list of tls.CurveID names/integers and returns a slice of tls.CurveID.
|
|
||||||
|
|
||||||
See ParseTlsCurve for details, as this is mostly just a wrapper around it.
|
|
||||||
|
|
||||||
If no curves are found, curves will only contain MaxCurveId.
|
|
||||||
*/
|
|
||||||
func ParseTlsCurves(s string) (curves []tls.CurveID) {
|
|
||||||
|
|
||||||
var curveNms []string
|
|
||||||
var curve tls.CurveID
|
|
||||||
var err error
|
|
||||||
|
|
||||||
curveNms = strings.Split(s, ",")
|
|
||||||
curves = make([]tls.CurveID, 0, len(curveNms))
|
|
||||||
|
|
||||||
for _, nm := range curveNms {
|
|
||||||
if curve, err = ParseTlsCurve(nm); err != nil {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
curves = append(curves, curve)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(curves) == 0 {
|
|
||||||
curves = []tls.CurveID{MaxCurveId}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseTlsUri parses a "TLS URI"'s query parameters. All certs and keys must be in PEM format.
|
|
||||||
|
|
||||||
You probably don't need this and should instead be using TlsUri.ToTlsConfig.
|
|
||||||
It just wraps this, but is probably more convenient.
|
|
||||||
*/
|
|
||||||
func ParseTlsUri(tlsUri *url.URL) (tlsConf *tls.Config, err error) {
|
|
||||||
|
|
||||||
var b []byte
|
|
||||||
var rootCAs *x509.CertPool
|
|
||||||
var intermediateCAs []*x509.Certificate
|
|
||||||
var privKeys []crypto.PrivateKey
|
|
||||||
var tlsCerts []tls.Certificate
|
|
||||||
var allowInvalid bool
|
|
||||||
var ciphers []uint16
|
|
||||||
var curves []tls.CurveID
|
|
||||||
var params map[string][]string
|
|
||||||
var ok bool
|
|
||||||
var val string
|
|
||||||
var minVer uint16
|
|
||||||
var maxVer uint16
|
|
||||||
var buf *bytes.Buffer = new(bytes.Buffer)
|
|
||||||
var srvNm string = tlsUri.Hostname()
|
|
||||||
|
|
||||||
params = tlsUri.Query()
|
|
||||||
|
|
||||||
if params == nil {
|
|
||||||
tlsConf = &tls.Config{
|
|
||||||
ServerName: srvNm,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are all filepath(s).
|
|
||||||
for _, k := range []string{
|
|
||||||
TlsUriParamCa,
|
|
||||||
TlsUriParamCert,
|
|
||||||
TlsUriParamKey,
|
|
||||||
} {
|
|
||||||
if _, ok = params[k]; ok {
|
|
||||||
for idx, _ := range params[k] {
|
|
||||||
if err = paths.RealPath(¶ms[k][idx]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CA cert(s).
|
|
||||||
buf.Reset()
|
|
||||||
if _, ok = params[TlsUriParamCa]; ok {
|
|
||||||
rootCAs = x509.NewCertPool()
|
|
||||||
for _, c := range params[TlsUriParamCa] {
|
|
||||||
if b, err = os.ReadFile(c); err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.Write(b)
|
|
||||||
}
|
|
||||||
if rootCAs, _, intermediateCAs, err = ParseCA(buf.Bytes()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if rootCAs, err = x509.SystemCertPool(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys. These are done first so we can match to a client certificate.
|
|
||||||
buf.Reset()
|
|
||||||
if _, ok = params[TlsUriParamKey]; ok {
|
|
||||||
for _, k := range params[TlsUriParamKey] {
|
|
||||||
if b, err = os.ReadFile(k); err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.Write(b)
|
|
||||||
}
|
|
||||||
if privKeys, err = ParsePrivateKey(buf.Bytes()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// (Client) Certificate(s).
|
|
||||||
buf.Reset()
|
|
||||||
if _, ok = params[TlsUriParamCert]; ok {
|
|
||||||
for _, c := range params[TlsUriParamCert] {
|
|
||||||
if b, err = os.ReadFile(c); err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.Write(b)
|
|
||||||
}
|
|
||||||
if tlsCerts, err = ParseLeafCert(buf.Bytes(), privKeys, intermediateCAs...); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname (Override).
|
|
||||||
if _, ok = params[TlsUriParamSni]; ok {
|
|
||||||
srvNm = params[TlsUriParamSni][0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable Verification.
|
|
||||||
if _, ok = params[TlsUriParamNoVerify]; ok {
|
|
||||||
val = strings.ToLower(params[TlsUriParamNoVerify][0])
|
|
||||||
for _, i := range paramBoolValsTrue {
|
|
||||||
if i == val {
|
|
||||||
allowInvalid = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ciphers.
|
|
||||||
if _, ok = params[TlsUriParamCipher]; ok {
|
|
||||||
ciphers = ParseTlsCiphers(strings.Join(params[TlsUriParamCipher], ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum TLS Protocol Version.
|
|
||||||
if _, ok = params[TlsUriParamMinTls]; ok {
|
|
||||||
if minVer, err = ParseTlsVersion(params[TlsUriParamMinTls][0]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum TLS Protocol Version.
|
|
||||||
if _, ok = params[TlsUriParamMaxTls]; ok {
|
|
||||||
if maxVer, err = ParseTlsVersion(params[TlsUriParamMaxTls][0]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Curves.
|
|
||||||
if _, ok = params[TlsUriParamCurve]; ok {
|
|
||||||
curves = ParseTlsCurves(strings.Join(params[TlsUriParamCurve], ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConf = &tls.Config{
|
|
||||||
Certificates: tlsCerts,
|
|
||||||
RootCAs: rootCAs,
|
|
||||||
ServerName: srvNm,
|
|
||||||
InsecureSkipVerify: allowInvalid,
|
|
||||||
CipherSuites: ciphers,
|
|
||||||
MinVersion: minVer,
|
|
||||||
MaxVersion: maxVer,
|
|
||||||
CurvePreferences: curves,
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTlsVersion parses string s and attempts to derive a TLS version from it. If none is found, tlsVer will be 0.
|
|
||||||
func ParseTlsVersion(s string) (tlsVer uint16, err error) {
|
|
||||||
|
|
||||||
var nm string
|
|
||||||
var n uint64
|
|
||||||
var i uint16
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
if n, err = strconv.ParseUint(s, 10, 16); err != nil {
|
|
||||||
if errors.Is(err, strconv.ErrSyntax) {
|
|
||||||
// It's a name; parse below.
|
|
||||||
err = nil
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// It's a number.
|
|
||||||
if nm = tls.VersionName(uint16(n)); strings.HasPrefix(nm, "0x") {
|
|
||||||
// ...but invalid.
|
|
||||||
err = ErrBadTlsVer
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
// Valid (as number). Return it.
|
|
||||||
tlsVer = uint16(n)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we get here, it should be parsed as a version string.
|
|
||||||
s = strings.ToUpper(s)
|
|
||||||
s = strings.ReplaceAll(s, "_", " ")
|
|
||||||
s = strings.ReplaceAll(s, "V", " ")
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
if !strings.HasPrefix(s, "SSL") && !strings.HasPrefix(s, "TLS ") {
|
|
||||||
s = "TLS " + s
|
|
||||||
}
|
|
||||||
|
|
||||||
// We build a dynamic map of version names to uint16s (if not already created).
|
|
||||||
if tlsVerNmToUint == nil {
|
|
||||||
tlsVerNmToUint = make(map[string]uint16)
|
|
||||||
for i = MinTlsVer; i <= MaxTlsVer; i++ {
|
|
||||||
if nm = tls.VersionName(i); !strings.HasPrefix(nm, "0x") {
|
|
||||||
tlsVerNmToUint[nm] = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i, ok = tlsVerNmToUint[s]; ok {
|
|
||||||
tlsVer = i
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseCA parses PEM bytes and returns an *x509.CertPool in caCerts.
|
|
||||||
|
|
||||||
Concatenated PEM files are supported.
|
|
||||||
|
|
||||||
Any keys found will be filtered out, as will any leaf certificates.
|
|
||||||
|
|
||||||
Any *intermediate* CAs (the certificate is a CA but it is not self-signed) will be returned separate from
|
|
||||||
certPool.
|
|
||||||
|
|
||||||
Ordering from the file is preserved in the returned slices.
|
|
||||||
*/
|
|
||||||
func ParseCA(certRaw []byte) (certPool *x509.CertPool, rootCerts []*x509.Certificate, intermediateCerts []*x509.Certificate, err error) {
|
|
||||||
|
|
||||||
var pemBlocks []*pem.Block
|
|
||||||
var cert *x509.Certificate
|
|
||||||
var certs []*x509.Certificate
|
|
||||||
|
|
||||||
if pemBlocks, err = SplitPem(certRaw); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter out keys etc. and non-CA certs.
|
|
||||||
for _, b := range pemBlocks {
|
|
||||||
if b.Type != "CERTIFICATE" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cert, err = x509.ParseCertificate(b.Bytes); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !cert.IsCA {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
certs = append(certs, cert)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cert = range certs {
|
|
||||||
if bytes.Equal(cert.RawIssuer, cert.RawSubject) {
|
|
||||||
// It's a root/self-signed.
|
|
||||||
rootCerts = append(rootCerts, cert)
|
|
||||||
} else {
|
|
||||||
// It's an intermediate.
|
|
||||||
intermediateCerts = append(intermediateCerts, cert)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootCerts != nil {
|
|
||||||
certPool = x509.NewCertPool()
|
|
||||||
for _, cert = range rootCerts {
|
|
||||||
certPool.AddCert(cert)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParseLeafCert parses PEM bytes from a (client) certificate file, iterates over a slice of
|
|
||||||
crypto.PrivateKey (finding one that matches), and returns one (or more) tls.Certificate.
|
|
||||||
|
|
||||||
The key may also be combined with the certificate in the same file.
|
|
||||||
|
|
||||||
If no private key matches or no client cert is found in the file, tlsCerts will be nil/missing
|
|
||||||
that certificate but no error will be returned.
|
|
||||||
This behavior can be avoided by passing a nil slice to keys.
|
|
||||||
|
|
||||||
Any leaf certificates ("server" certificate, as opposed to a signer/issuer) found in the file
|
|
||||||
will be assumed to be the desired one(s).
|
|
||||||
|
|
||||||
Any additional/supplementary intermediates may be provided. Any present in the PEM bytes (certRaw) will be included.
|
|
||||||
|
|
||||||
Any *root* CAs found will be discarded. They should/can be extracted seperately via ParseCA.
|
|
||||||
|
|
||||||
The parsed and paired certificates and keys can be found in each respective tls.Certificate.Leaf and tls.Certificate.PrivateKey.
|
|
||||||
Any certs without a corresponding key will be discarded.
|
|
||||||
*/
|
|
||||||
func ParseLeafCert(certRaw []byte, keys []crypto.PrivateKey, intermediates ...*x509.Certificate) (tlsCerts []tls.Certificate, err error) {
|
|
||||||
|
|
||||||
var pemBlocks []*pem.Block
|
|
||||||
var cert *x509.Certificate
|
|
||||||
var certs []*x509.Certificate
|
|
||||||
var caCerts []*x509.Certificate
|
|
||||||
var parsedKeys []crypto.PrivateKey
|
|
||||||
var isMatched bool
|
|
||||||
var foundKey crypto.PrivateKey
|
|
||||||
var interBytes [][]byte
|
|
||||||
var skipKeyPair bool = keys == nil
|
|
||||||
var parsedKeysBuf *bytes.Buffer = new(bytes.Buffer)
|
|
||||||
|
|
||||||
if pemBlocks, err = SplitPem(certRaw); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range pemBlocks {
|
|
||||||
if strings.Contains(b.Type, "PRIVATE KEY") {
|
|
||||||
parsedKeysBuf.Write(pem.EncodeToMemory(b))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if b.Type != "CERTIFICATE" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cert, err = x509.ParseCertificate(b.Bytes); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cert.IsCA {
|
|
||||||
if bytes.Equal(cert.RawIssuer, cert.RawSubject) {
|
|
||||||
caCerts = append(caCerts, cert)
|
|
||||||
} else {
|
|
||||||
intermediates = append(intermediates, cert)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
certs = append(certs, cert)
|
|
||||||
}
|
|
||||||
|
|
||||||
if intermediates != nil && len(intermediates) != 0 {
|
|
||||||
interBytes = make([][]byte, len(intermediates))
|
|
||||||
for _, i := range intermediates {
|
|
||||||
interBytes = append(interBytes, i.Raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if parsedKeysBuf.Len() != 0 {
|
|
||||||
if parsedKeys, err = ParsePrivateKey(parsedKeysBuf.Bytes()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys = append(keys, parsedKeys...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now pair the certs and keys, and combine as a tls.Certificate.
|
|
||||||
for _, cert = range certs {
|
|
||||||
foundKey = nil
|
|
||||||
for _, k := range keys {
|
|
||||||
if isMatched, err = IsMatchedPair(k, cert); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if isMatched {
|
|
||||||
foundKey = k
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if foundKey == nil && !skipKeyPair {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tlsCerts = append(
|
|
||||||
tlsCerts,
|
|
||||||
tls.Certificate{
|
|
||||||
Certificate: append([][]byte{cert.Raw}, interBytes...),
|
|
||||||
PrivateKey: foundKey,
|
|
||||||
Leaf: cert,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = caCerts
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ParsePrivateKey parses PEM bytes to a private key. Multiple keys may be concatenated in the same file.
|
|
||||||
|
|
||||||
Any public keys, certificates, etc. found will be discarded.
|
|
||||||
*/
|
|
||||||
func ParsePrivateKey(keyRaw []byte) (keys []crypto.PrivateKey, err error) {
|
|
||||||
|
|
||||||
var privKey crypto.PrivateKey
|
|
||||||
var pemBlocks []*pem.Block
|
|
||||||
|
|
||||||
if pemBlocks, err = SplitPem(keyRaw); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range pemBlocks {
|
|
||||||
if !strings.Contains(b.Type, "PRIVATE KEY") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch b.Type {
|
|
||||||
case "RSA PRIVATE KEY": // PKCS#1
|
|
||||||
if privKey, err = x509.ParsePKCS1PrivateKey(b.Bytes); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys = append(keys, privKey)
|
|
||||||
case "EC PRIVATE KEY": // SEC 1, ASN.1 DER
|
|
||||||
if privKey, err = x509.ParseECPrivateKey(b.Bytes); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys = append(keys, privKey)
|
|
||||||
case "PRIVATE KEY": // PKCS#8
|
|
||||||
if privKey, err = x509.ParsePKCS8PrivateKey(b.Bytes); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys = append(keys, privKey)
|
|
||||||
default:
|
|
||||||
err = ErrUnknownKey
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitPem splits a single block of bytes into one (or more) (encoding/)pem.Blocks.
|
|
||||||
func SplitPem(pemRaw []byte) (blocks []*pem.Block, err error) {
|
|
||||||
|
|
||||||
var block *pem.Block
|
|
||||||
var rest []byte
|
|
||||||
|
|
||||||
for block, rest = pem.Decode(pemRaw); block != nil; block, rest = pem.Decode(rest) {
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,217 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`bytes`
|
|
||||||
`crypto`
|
|
||||||
`crypto/tls`
|
|
||||||
`crypto/x509`
|
|
||||||
`errors`
|
|
||||||
`fmt`
|
|
||||||
`net/url`
|
|
||||||
`os`
|
|
||||||
`strings`
|
|
||||||
|
|
||||||
`r00t2.io/sysutils/paths`
|
|
||||||
)
|
|
||||||
|
|
||||||
// Normalize ensures that all specified filepaths are absolute, etc.
|
|
||||||
func (t *TlsFlat) Normalize() (err error) {
|
|
||||||
|
|
||||||
if t.Certs != nil {
|
|
||||||
for _, c := range t.Certs {
|
|
||||||
if err = paths.RealPath(&c.CertFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.KeyFile != nil {
|
|
||||||
if err = paths.RealPath(c.KeyFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.CaFiles != nil {
|
|
||||||
for idx, _ := range t.CaFiles {
|
|
||||||
if err = paths.RealPath(&t.CaFiles[idx]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
ToTlsConfig returns a tls.Config from a TlsFlat. Note that it will have Normalize called on it.
|
|
||||||
|
|
||||||
Unfortunately it's not possible for this library to do the reverse, as CA certificates are not able to be extracted from an x509.CertPool.
|
|
||||||
*/
|
|
||||||
func (t *TlsFlat) ToTlsConfig() (tlsConf *tls.Config, err error) {
|
|
||||||
|
|
||||||
var b []byte
|
|
||||||
var rootCAs *x509.CertPool
|
|
||||||
var intermediateCAs []*x509.Certificate
|
|
||||||
var privKeys []crypto.PrivateKey
|
|
||||||
var tlsCerts []tls.Certificate
|
|
||||||
var parsedTlsCerts []tls.Certificate
|
|
||||||
var ciphers []uint16
|
|
||||||
var curves []tls.CurveID
|
|
||||||
var minVer uint16
|
|
||||||
var maxVer uint16
|
|
||||||
var buf *bytes.Buffer = new(bytes.Buffer)
|
|
||||||
var srvNm string = t.SniName
|
|
||||||
|
|
||||||
// Normalize any filepaths before validation.
|
|
||||||
if err = t.Normalize(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// And validate.
|
|
||||||
if err = validate.Struct(t); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CA cert(s).
|
|
||||||
buf.Reset()
|
|
||||||
if t.CaFiles != nil {
|
|
||||||
rootCAs = x509.NewCertPool()
|
|
||||||
for _, c := range t.CaFiles {
|
|
||||||
if b, err = os.ReadFile(c); err != nil {
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.Write(b)
|
|
||||||
}
|
|
||||||
if rootCAs, _, intermediateCAs, err = ParseCA(buf.Bytes()); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if rootCAs, err = x509.SystemCertPool(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys and Certs. They are assumed to be matched.
|
|
||||||
if t.Certs != nil {
|
|
||||||
for _, c := range t.Certs {
|
|
||||||
privKeys = nil
|
|
||||||
if c.KeyFile != nil {
|
|
||||||
if b, err = os.ReadFile(*c.KeyFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if privKeys, err = ParsePrivateKey(b); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if b, err = os.ReadFile(c.CertFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if parsedTlsCerts, err = ParseLeafCert(b, privKeys, intermediateCAs...); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tlsCerts = append(tlsCerts, parsedTlsCerts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ciphers.
|
|
||||||
if t.CipherSuites != nil {
|
|
||||||
ciphers = ParseTlsCiphers(strings.Join(t.CipherSuites, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum TLS Protocol Version.
|
|
||||||
if t.MinTlsProtocol != nil {
|
|
||||||
if minVer, err = ParseTlsVersion(*t.MinTlsProtocol); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum TLS Protocol Version.
|
|
||||||
if t.MaxTlsProtocol != nil {
|
|
||||||
if maxVer, err = ParseTlsVersion(*t.MaxTlsProtocol); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Curves.
|
|
||||||
if t.Curves != nil {
|
|
||||||
curves = ParseTlsCurves(strings.Join(t.Curves, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConf = &tls.Config{
|
|
||||||
Certificates: tlsCerts,
|
|
||||||
RootCAs: rootCAs,
|
|
||||||
ServerName: srvNm,
|
|
||||||
InsecureSkipVerify: t.SkipVerify,
|
|
||||||
CipherSuites: ciphers,
|
|
||||||
MinVersion: minVer,
|
|
||||||
MaxVersion: maxVer,
|
|
||||||
CurvePreferences: curves,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTlsUri returns a TlsUri from a TlsFlat.
|
|
||||||
func (t *TlsFlat) ToTlsUri() (tlsUri *TlsUri, err error) {
|
|
||||||
|
|
||||||
var u *url.URL
|
|
||||||
|
|
||||||
if u, err = url.Parse(fmt.Sprintf("tls://%v/", t.SniName)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CA cert(s).
|
|
||||||
if t.CaFiles != nil {
|
|
||||||
for _, c := range t.CaFiles {
|
|
||||||
u.Query().Add(TlsUriParamCa, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys and Certs.
|
|
||||||
if t.Certs != nil {
|
|
||||||
for _, c := range t.Certs {
|
|
||||||
u.Query().Add(TlsUriParamCert, c.CertFile)
|
|
||||||
if c.KeyFile != nil {
|
|
||||||
u.Query().Add(TlsUriParamKey, *c.KeyFile)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enforce the SNI hostname.
|
|
||||||
u.Query().Add(TlsUriParamSni, t.SniName)
|
|
||||||
|
|
||||||
// Disable Verification.
|
|
||||||
if t.SkipVerify {
|
|
||||||
u.Query().Add(TlsUriParamNoVerify, "1")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ciphers.
|
|
||||||
if t.CipherSuites != nil {
|
|
||||||
for _, c := range t.CipherSuites {
|
|
||||||
u.Query().Add(TlsUriParamCipher, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum TLS Protocol Version.
|
|
||||||
if t.MinTlsProtocol != nil {
|
|
||||||
u.Query().Add(TlsUriParamMinTls, *t.MinTlsProtocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum TLS Protocol Version.
|
|
||||||
if t.MaxTlsProtocol != nil {
|
|
||||||
u.Query().Add(TlsUriParamMaxTls, *t.MaxTlsProtocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Curves.
|
|
||||||
if t.Curves != nil {
|
|
||||||
for _, c := range t.Curves {
|
|
||||||
u.Query().Add(TlsUriParamCurve, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsUri = &TlsUri{
|
|
||||||
URL: u,
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,159 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`crypto`
|
|
||||||
`crypto/tls`
|
|
||||||
`net/url`
|
|
||||||
`os`
|
|
||||||
`strings`
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
ToTlsConfig returns a *tls.Config from a TlsUri.
|
|
||||||
|
|
||||||
Unfortunately it's not possible for this library to do the reverse, as CA certificates are not able to be extracted from an x509.CertPool.
|
|
||||||
*/
|
|
||||||
func (t *TlsUri) ToTlsConfig() (cfg *tls.Config, err error) {
|
|
||||||
|
|
||||||
if cfg, err = ParseTlsUri(t.URL); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTlsFlat returns a *TlsFlat from a TlsUri.
|
|
||||||
func (t *TlsUri) ToTlsFlat() (tlsFlat *TlsFlat, err error) {
|
|
||||||
|
|
||||||
var b []byte
|
|
||||||
var params url.Values
|
|
||||||
var paramMap map[string][]string
|
|
||||||
// These also have maps so they can backmap filenames.
|
|
||||||
var privKeys []crypto.PrivateKey
|
|
||||||
var privKeyMap map[string][]crypto.PrivateKey
|
|
||||||
var tlsCerts []tls.Certificate
|
|
||||||
var tlsCertMap map[string][]tls.Certificate
|
|
||||||
var isMatch bool
|
|
||||||
var fCert *TlsFlatCert
|
|
||||||
var val string
|
|
||||||
var f TlsFlat = TlsFlat{
|
|
||||||
SniName: t.Hostname(),
|
|
||||||
SkipVerify: false,
|
|
||||||
Certs: nil,
|
|
||||||
CaFiles: nil,
|
|
||||||
CipherSuites: nil,
|
|
||||||
MinTlsProtocol: nil,
|
|
||||||
MaxTlsProtocol: nil,
|
|
||||||
Curves: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
params = t.Query()
|
|
||||||
paramMap = params
|
|
||||||
|
|
||||||
if params == nil {
|
|
||||||
tlsFlat = &f
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CA cert(s).
|
|
||||||
if t.Query().Has(TlsUriParamCa) {
|
|
||||||
f.CaFiles = append(f.CaFiles, paramMap[TlsUriParamCa]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys and Certs. These are done first so we can match to a client certificate.
|
|
||||||
if t.Query().Has(TlsUriParamKey) {
|
|
||||||
privKeyMap = make(map[string][]crypto.PrivateKey)
|
|
||||||
for _, kFile := range paramMap[TlsUriParamKey] {
|
|
||||||
if b, err = os.ReadFile(kFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if privKeyMap[kFile], err = ParsePrivateKey(b); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
privKeys = append(privKeys, privKeyMap[kFile]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.Query().Has(TlsUriParamCert) {
|
|
||||||
tlsCertMap = make(map[string][]tls.Certificate)
|
|
||||||
for _, cFile := range paramMap[TlsUriParamCert] {
|
|
||||||
if b, err = os.ReadFile(cFile); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if tlsCertMap[cFile], err = ParseLeafCert(b, privKeys); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tlsCerts = append(tlsCerts, tlsCertMap[cFile]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We then correlate. Whew, lads.
|
|
||||||
for cFile, c := range tlsCertMap {
|
|
||||||
for _, cert := range c {
|
|
||||||
for kFile, k := range privKeyMap {
|
|
||||||
if isMatch, err = IsMatchedPair(k, cert.Leaf); err != nil {
|
|
||||||
return
|
|
||||||
} else if isMatch {
|
|
||||||
fCert = &TlsFlatCert{
|
|
||||||
CertFile: cFile,
|
|
||||||
KeyFile: new(string),
|
|
||||||
}
|
|
||||||
*fCert.KeyFile = kFile
|
|
||||||
f.Certs = append(f.Certs, fCert)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname.
|
|
||||||
if t.Query().Has(TlsUriParamSni) {
|
|
||||||
f.SniName = t.Query().Get(TlsUriParamSni)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable verification.
|
|
||||||
if t.Query().Has(TlsUriParamNoVerify) {
|
|
||||||
val = strings.ToLower(t.Query().Get(TlsUriParamNoVerify))
|
|
||||||
for _, i := range paramBoolValsTrue {
|
|
||||||
if val == i {
|
|
||||||
f.SkipVerify = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ciphers.
|
|
||||||
if t.Query().Has(TlsUriParamCipher) {
|
|
||||||
f.CipherSuites = params[TlsUriParamCipher]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum TLS Protocol Version.
|
|
||||||
if t.Query().Has(TlsUriParamMinTls) {
|
|
||||||
f.MinTlsProtocol = new(string)
|
|
||||||
*f.MinTlsProtocol = t.Query().Get(TlsUriParamMinTls)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum TLS Protocol Version.
|
|
||||||
if t.Query().Has(TlsUriParamMaxTls) {
|
|
||||||
f.MaxTlsProtocol = new(string)
|
|
||||||
*f.MaxTlsProtocol = t.Query().Get(TlsUriParamMaxTls)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Curves.
|
|
||||||
if t.Query().Has(TlsUriParamCurve) {
|
|
||||||
f.Curves = params[TlsUriParamCurve]
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsFlat = &f
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToURL returns the *url.URL representation of a TlsUri.
|
|
||||||
func (t *TlsUri) ToURL() (u *url.URL) {
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
u = t.URL
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
package cryptparse
|
|
||||||
|
|
||||||
import (
|
|
||||||
`encoding/xml`
|
|
||||||
`net/url`
|
|
||||||
)
|
|
||||||
|
|
||||||
// TlsFlat provides an easy structure to marshal/unmarshal a tls.Config from/to a data structure (JSON, XML, etc.).
|
|
||||||
type TlsFlat struct {
|
|
||||||
XMLName xml.Name `xml:"tlsConfig" json:"-" yaml:"-" toml:"-"`
|
|
||||||
SniName string `json:"sni_name" xml:"sniName,attr" yaml:"SniName" toml:"SniName" required:"true" validate:"required"`
|
|
||||||
SkipVerify bool `json:"skip_verify,omitempty" xml:"skipVerify,attr,omitempty" yaml:"SkipVerify,omitempty" toml:"SkipVerify,omitempty"`
|
|
||||||
Certs []*TlsFlatCert `json:"certs,omitempty" xml:"certs>cert,omitempty" yaml:"Certs,omitempty" toml:"Certs,omitempty" validate:"omitempty,dive"`
|
|
||||||
CaFiles []string `json:"ca_files,omitempty" xml:"roots>ca,omitempty" yaml:"CaFiles,omitempty" toml:"CaFiles,omitempty" validate:"omitempty,dive,filepath"`
|
|
||||||
CipherSuites []string `json:"cipher_suites,omitempty" xml:"ciphers,omitempty" yaml:"CipherSuites,omitempty" toml:"CipherSuites,omitempty"`
|
|
||||||
MinTlsProtocol *string `json:"min_tls_protocol,omitempty" xml:"minTlsProtocol,attr,omitempty" yaml:"MinTlsProtocol,omitempty" toml:"MinTlsProtocol,omitempty"`
|
|
||||||
MaxTlsProtocol *string `json:"max_tls_protocol,omitempty" xml:"maxTlsProtocol,attr,omitempty" yaml:"MaxTlsProtocol,omitempty" toml:"MaxTlsProtocol,omitempty"`
|
|
||||||
Curves []string `json:"curves,omitempty" xml:"curves>curve,omitempty" yaml:"Curves,omitempty" toml:"Curves,omitempty" validate:"omitempty,dive"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TlsFlatCert represents a certificate (and, possibly, paired key).
|
|
||||||
type TlsFlatCert struct {
|
|
||||||
XMLName xml.Name `xml:"cert" json:"-" yaml:"-" toml:"-"`
|
|
||||||
KeyFile *string `json:"key,omitempty" xml:"key,attr,omitempty" yaml:"Key,omitempty" toml:"Key,omitempty" validate:"omitempty,filepath"`
|
|
||||||
CertFile string `json:"cert" xml:",chardata" yaml:"Certificate" toml:"Certificate" required:"true" validate:"required,filepath"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TlsUri struct {
|
|
||||||
*url.URL
|
|
||||||
}
|
|
220
envs/funcs.go
220
envs/funcs.go
@ -1,22 +1,98 @@
|
|||||||
package envs
|
package envs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
`bytes`
|
"bytes"
|
||||||
`errors`
|
"errors"
|
||||||
`fmt`
|
"fmt"
|
||||||
`io/ioutil`
|
"io/ioutil"
|
||||||
`os`
|
"os"
|
||||||
`reflect`
|
"reflect"
|
||||||
`strings`
|
"strings"
|
||||||
`sync`
|
"sync"
|
||||||
|
|
||||||
`r00t2.io/goutils/multierr`
|
"r00t2.io/goutils/multierr"
|
||||||
`r00t2.io/goutils/structutils`
|
"r00t2.io/goutils/structutils"
|
||||||
`r00t2.io/sysutils/errs`
|
"r00t2.io/sysutils/errs"
|
||||||
`r00t2.io/sysutils/internal`
|
"r00t2.io/sysutils/internal"
|
||||||
`r00t2.io/sysutils/paths`
|
"r00t2.io/sysutils/paths"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
DefEnv operates like Python's .get() method on dicts (maps);
|
||||||
|
if the environment variable specified by key does not exist/is not specified,
|
||||||
|
then the value specified by fallback will be returned instead
|
||||||
|
otherwise key's value is returned.
|
||||||
|
*/
|
||||||
|
func DefEnv(key, fallback string) (value string) {
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
|
||||||
|
if value, exists = os.LookupEnv(key); !exists {
|
||||||
|
value = fallback
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefEnvBlank is like DefEnv but will ADDITIONALLY/ALSO apply fallback if key is *defined/exists but is an empty string*.
|
||||||
|
func DefEnvBlank(key, fallback string) (value string) {
|
||||||
|
|
||||||
|
value = DefEnv(key, fallback)
|
||||||
|
if value == "" {
|
||||||
|
value = fallback
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEnvErr returns the value of key if it exists. If it does not exist, err will be an EnvErrNoVal.
|
||||||
|
func GetEnvErr(key string) (value string, err error) {
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
|
||||||
|
if value, exists = os.LookupEnv(key); !exists {
|
||||||
|
err = &EnvErrNoVal{
|
||||||
|
VarName: key,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GetEnvErrNoBlank behaves exactly like GetEnvErr with the
|
||||||
|
additional stipulation that the value must not be empty.
|
||||||
|
|
||||||
|
An error for a value that is non-empty but whitespace only (e.g. VARNM="\t")
|
||||||
|
can be returned if ignoreWhitespace == true.
|
||||||
|
|
||||||
|
(If it is, an EnvErrNoVal will also be returned.)
|
||||||
|
*/
|
||||||
|
func GetEnvErrNoBlank(key string, ignoreWhitespace bool) (value string, err error) {
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
var e *EnvErrNoVal = &EnvErrNoVal{
|
||||||
|
VarName: key,
|
||||||
|
WasRequiredNonEmpty: true,
|
||||||
|
IgnoreWhitespace: ignoreWhitespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, exists = os.LookupEnv(key); !exists {
|
||||||
|
err = e
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
e.WasFound = true
|
||||||
|
e.WasWhitespace = (strings.TrimSpace(value) == "") && (value != "")
|
||||||
|
if ignoreWhitespace && e.WasWhitespace {
|
||||||
|
err = e
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// GetEnvMap returns a map of all environment variables. All values are strings.
|
// GetEnvMap returns a map of all environment variables. All values are strings.
|
||||||
func GetEnvMap() (envVars map[string]string) {
|
func GetEnvMap() (envVars map[string]string) {
|
||||||
|
|
||||||
@ -28,18 +104,18 @@ func GetEnvMap() (envVars map[string]string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetEnvMapNative returns a map of all environment variables, but attempts to "nativize" them.
|
GetEnvMapNative returns a map of all environment variables, but attempts to "nativize" them.
|
||||||
All values are interfaces. It is up to the caller to typeswitch them to proper types.
|
All values are interfaces. It is up to the caller to typeswitch them to proper types.
|
||||||
|
|
||||||
Note that the PATH/Path environment variable (for *Nix and Windows, respectively) will be
|
Note that the PATH/Path environment variable (for *Nix and Windows, respectively) will be
|
||||||
a []string (as per GetPathEnv). No other env vars, even if they contain os.PathListSeparator,
|
a []string (as per GetPathEnv). No other env vars, even if they contain os.PathListSeparator,
|
||||||
will be transformed to a slice or the like.
|
will be transformed to a slice or the like.
|
||||||
If an error occurs during parsing the path env var, it will be rendered as a string.
|
If an error occurs during parsing the path env var, it will be rendered as a string.
|
||||||
|
|
||||||
All number types will attempt to be their 64-bit version (i.e. int64, uint64, float64, etc.).
|
All number types will attempt to be their 64-bit version (i.e. int64, uint64, float64, etc.).
|
||||||
|
|
||||||
If a type cannot be determined for a value, its string form will be used
|
If a type cannot be determined for a value, its string form will be used
|
||||||
(as it would be found in GetEnvMap).
|
(as it would be found in GetEnvMap).
|
||||||
*/
|
*/
|
||||||
func GetEnvMapNative() (envMap map[string]interface{}) {
|
func GetEnvMapNative() (envMap map[string]interface{}) {
|
||||||
|
|
||||||
@ -51,24 +127,24 @@ func GetEnvMapNative() (envMap map[string]interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetFirst gets the first instance if populated/set occurrence of varNames.
|
GetFirst gets the first instance if populated/set occurrence of varNames.
|
||||||
|
|
||||||
For example, if you have three potential env vars, FOO, FOOBAR, FOOBARBAZ,
|
For example, if you have three potential env vars, FOO, FOOBAR, FOOBARBAZ,
|
||||||
and want to follow the logic flow of:
|
and want to follow the logic flow of:
|
||||||
|
|
||||||
1.) Check if FOO is set. If not,
|
1.) Check if FOO is set. If not,
|
||||||
2.) Check if FOOBAR is set. If not,
|
2.) Check if FOOBAR is set. If not,
|
||||||
3.) Check if FOOBARBAZ is set.
|
3.) Check if FOOBARBAZ is set.
|
||||||
|
|
||||||
Then this would be specified as:
|
Then this would be specified as:
|
||||||
|
|
||||||
GetFirst([]string{"FOO", "FOOBAR", "FOOBARBAZ"})
|
GetFirst([]string{"FOO", "FOOBAR", "FOOBARBAZ"})
|
||||||
|
|
||||||
If val is "" and ok is true, this means that one of the specified variable names IS
|
If val is "" and ok is true, this means that one of the specified variable names IS
|
||||||
set but is set to an empty value. If ok is false, none of the specified variables
|
set but is set to an empty value. If ok is false, none of the specified variables
|
||||||
are set.
|
are set.
|
||||||
|
|
||||||
It is a thin wrapper around GetFirstWithRef.
|
It is a thin wrapper around GetFirstWithRef.
|
||||||
*/
|
*/
|
||||||
func GetFirst(varNames []string) (val string, ok bool) {
|
func GetFirst(varNames []string) (val string, ok bool) {
|
||||||
|
|
||||||
@ -78,14 +154,14 @@ func GetFirst(varNames []string) (val string, ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetFirstWithRef behaves exactly like GetFirst, but with an additional returned value, idx,
|
GetFirstWithRef behaves exactly like GetFirst, but with an additional returned value, idx,
|
||||||
which specifies the index in varNames in which a set variable was found. e.g. if:
|
which specifies the index in varNames in which a set variable was found. e.g. if:
|
||||||
|
|
||||||
GetFirstWithRef([]string{"FOO", "FOOBAR", "FOOBAZ"})
|
GetFirstWithRef([]string{"FOO", "FOOBAR", "FOOBAZ"})
|
||||||
|
|
||||||
is called and FOO is not set but FOOBAR is, idx will be 1.
|
is called and FOO is not set but FOOBAR is, idx will be 1.
|
||||||
|
|
||||||
If ok is false, idx will always be -1 and should be ignored.
|
If ok is false, idx will always be -1 and should be ignored.
|
||||||
*/
|
*/
|
||||||
func GetFirstWithRef(varNames []string) (val string, ok bool, idx int) {
|
func GetFirstWithRef(varNames []string) (val string, ok bool, idx int) {
|
||||||
|
|
||||||
@ -120,8 +196,8 @@ func GetPathEnv() (pathList []string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetPidEnvMap will only work on *NIX-like systems with procfs.
|
GetPidEnvMap will only work on *NIX-like systems with procfs.
|
||||||
It gets the environment variables of a given process' PID.
|
It gets the environment variables of a given process' PID.
|
||||||
*/
|
*/
|
||||||
func GetPidEnvMap(pid uint32) (envMap map[string]string, err error) {
|
func GetPidEnvMap(pid uint32) (envMap map[string]string, err error) {
|
||||||
|
|
||||||
@ -131,7 +207,7 @@ func GetPidEnvMap(pid uint32) (envMap map[string]string, err error) {
|
|||||||
var procPath string
|
var procPath string
|
||||||
var exists bool
|
var exists bool
|
||||||
|
|
||||||
envMap = make(map[string]string, 0)
|
envMap = make(map[string]string)
|
||||||
|
|
||||||
procPath = fmt.Sprintf("/proc/%v/environ", pid)
|
procPath = fmt.Sprintf("/proc/%v/environ", pid)
|
||||||
|
|
||||||
@ -159,10 +235,10 @@ func GetPidEnvMap(pid uint32) (envMap map[string]string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetPidEnvMapNative, like GetEnvMapNative, returns a map of all environment variables, but attempts to "nativize" them.
|
GetPidEnvMapNative, like GetEnvMapNative, returns a map of all environment variables, but attempts to "nativize" them.
|
||||||
All values are interfaces. It is up to the caller to typeswitch them to proper types.
|
All values are interfaces. It is up to the caller to typeswitch them to proper types.
|
||||||
|
|
||||||
See the documentation for GetEnvMapNative for details.
|
See the documentation for GetEnvMapNative for details.
|
||||||
*/
|
*/
|
||||||
func GetPidEnvMapNative(pid uint32) (envMap map[string]interface{}, err error) {
|
func GetPidEnvMapNative(pid uint32) (envMap map[string]interface{}, err error) {
|
||||||
|
|
||||||
@ -178,11 +254,11 @@ func GetPidEnvMapNative(pid uint32) (envMap map[string]interface{}, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
HasEnv is much like os.LookupEnv, but only returns a boolean for
|
HasEnv is much like os.LookupEnv, but only returns a boolean for
|
||||||
if the environment variable key exists or not.
|
if the environment variable key exists or not.
|
||||||
|
|
||||||
This is useful anywhere you may need to set a boolean in a func call
|
This is useful anywhere you may need to set a boolean in a func call
|
||||||
depending on the *presence* of an env var or not.
|
depending on the *presence* of an env var or not.
|
||||||
*/
|
*/
|
||||||
func HasEnv(key string) (envIsSet bool) {
|
func HasEnv(key string) (envIsSet bool) {
|
||||||
|
|
||||||
@ -192,28 +268,28 @@ func HasEnv(key string) (envIsSet bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Interpolate takes one of:
|
Interpolate takes one of:
|
||||||
|
|
||||||
- a string (pointer only)
|
- a string (pointer only)
|
||||||
- a struct (pointer only)
|
- a struct (pointer only)
|
||||||
- a map (applied to both keys *and* values)
|
- a map (applied to both keys *and* values)
|
||||||
- a slice
|
- a slice
|
||||||
|
|
||||||
and performs variable substitution on strings from environment variables.
|
and performs variable substitution on strings from environment variables.
|
||||||
|
|
||||||
It supports both UNIX/Linux/POSIX syntax formats (e.g. $VARNAME, ${VARNAME}) and,
|
It supports both UNIX/Linux/POSIX syntax formats (e.g. $VARNAME, ${VARNAME}) and,
|
||||||
if on Windows, it *additionally* supports the EXPAND_SZ format (e.g. %VARNAME%).
|
if on Windows, it *additionally* supports the EXPAND_SZ format (e.g. %VARNAME%).
|
||||||
|
|
||||||
For structs, the tag name used can be changed by setting the StructTagInterpolate
|
For structs, the tag name used can be changed by setting the StructTagInterpolate
|
||||||
variable in this submodule; the default is `envsub`.
|
variable in this submodule; the default is `envsub`.
|
||||||
If the tag value is "-", the field will be skipped.
|
If the tag value is "-", the field will be skipped.
|
||||||
For map fields within structs etc., the default is to apply interpolation to both keys and values.
|
For map fields within structs etc., the default is to apply interpolation to both keys and values.
|
||||||
All other tag value(s) are ignored.
|
All other tag value(s) are ignored.
|
||||||
|
|
||||||
For maps and slices, Interpolate will recurse into values (e.g. [][]string will work as expected).
|
For maps and slices, Interpolate will recurse into values (e.g. [][]string will work as expected).
|
||||||
|
|
||||||
If s is nil, no interpolation will be performed. No error will be returned.
|
If s is nil, no interpolation will be performed. No error will be returned.
|
||||||
If s is not a valid/supported type, no interpolation will be performed. No error will be returned.
|
If s is not a valid/supported type, no interpolation will be performed. No error will be returned.
|
||||||
*/
|
*/
|
||||||
func Interpolate[T any](s T) (err error) {
|
func Interpolate[T any](s T) (err error) {
|
||||||
|
|
||||||
@ -265,16 +341,16 @@ func Interpolate[T any](s T) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
InterpolateString takes (a pointer to) a struct or string and performs variable substitution on it
|
InterpolateString takes (a pointer to) a struct or string and performs variable substitution on it
|
||||||
from environment variables.
|
from environment variables.
|
||||||
|
|
||||||
It supports both UNIX/Linux/POSIX syntax formats (e.g. $VARNAME, ${VARNAME}) and,
|
It supports both UNIX/Linux/POSIX syntax formats (e.g. $VARNAME, ${VARNAME}) and,
|
||||||
if on Windows, it *additionally* supports the EXPAND_SZ format (e.g. %VARNAME%).
|
if on Windows, it *additionally* supports the EXPAND_SZ format (e.g. %VARNAME%).
|
||||||
|
|
||||||
If s is nil, nothing will be done and err will be ErrNilPtr.
|
If s is nil, nothing will be done and err will be ErrNilPtr.
|
||||||
|
|
||||||
This is a standalone function that is much more performant than Interpolate
|
This is a standalone function that is much more performant than Interpolate
|
||||||
at the cost of rigidity.
|
at the cost of rigidity.
|
||||||
*/
|
*/
|
||||||
func InterpolateString(s *string) (err error) {
|
func InterpolateString(s *string) (err error) {
|
||||||
|
|
||||||
|
27
envs/funcs_enverrnoval.go
Normal file
27
envs/funcs_enverrnoval.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package envs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error conforms to a stdlib error interface.
|
||||||
|
func (e *EnvErrNoVal) Error() (errStr string) {
|
||||||
|
|
||||||
|
var sb *strings.Builder = new(strings.Builder)
|
||||||
|
|
||||||
|
sb.WriteString("the variable '")
|
||||||
|
sb.WriteString(e.VarName)
|
||||||
|
sb.WriteString("' was ")
|
||||||
|
if e.WasFound {
|
||||||
|
sb.WriteString("found")
|
||||||
|
} else {
|
||||||
|
sb.WriteString("not found")
|
||||||
|
}
|
||||||
|
if e.WasRequiredNonEmpty && e.WasFound {
|
||||||
|
sb.WriteString(" but is empty and was required to be non-empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
errStr = sb.String()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
20
envs/types.go
Normal file
20
envs/types.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package envs
|
||||||
|
|
||||||
|
type (
|
||||||
|
/*
|
||||||
|
EnvErrNoVal is an error containing the variable that does not exist
|
||||||
|
(and information surrounding the errored state).
|
||||||
|
*/
|
||||||
|
EnvErrNoVal struct {
|
||||||
|
// VarName is the variable name/key name originally specified in the function call.
|
||||||
|
VarName string `json:"var" toml:"VariableName" yaml:"Variable Name/Key" xml:"key,attr"`
|
||||||
|
// WasFound is only used for GetEnvErrNoBlank(). It is true if the variable was found/populated.
|
||||||
|
WasFound bool `json:"found" toml:"Found" yaml:"Found" xml:"found,attr"`
|
||||||
|
// WasRequiredNonEmpty indicates that this error was returned in a context where a variable was required to be non-empty (e.g. via GetEnvErrNoBlank()) but was empty.
|
||||||
|
WasRequiredNonEmpty bool `json:"reqd_non_empty" toml:"RequiredNonEmpty" yaml:"Required Non-Empty" xml:"reqNonEmpty,attr"`
|
||||||
|
// IgnoreWhitespace is true if the value was found but its evaluation was done against a whitestripped version.
|
||||||
|
IgnoreWhitespace bool `json:"ignore_ws" toml:"IgnoreWhitespace" yaml:"Ignore Whitespace" xml:"ignoreWhitespace,attr"`
|
||||||
|
// WasWhitespace is true if the value was whitespace-only.
|
||||||
|
WasWhitespace bool `json:"was_ws" toml:"WasWhitespace" yaml:"Was Whitespace Only" xml:"wasWhitespace,attr"`
|
||||||
|
}
|
||||||
|
)
|
@ -1,10 +1,10 @@
|
|||||||
package envs
|
package envs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
`strconv`
|
"strconv"
|
||||||
`strings`
|
"strings"
|
||||||
|
|
||||||
`r00t2.io/sysutils/internal`
|
"r00t2.io/sysutils/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// envListToMap splits a []string of env var keypairs to a map.
|
// envListToMap splits a []string of env var keypairs to a map.
|
||||||
@ -13,7 +13,7 @@ func envListToMap(envs []string) (envMap map[string]string) {
|
|||||||
var kv []string
|
var kv []string
|
||||||
var k, v string
|
var k, v string
|
||||||
|
|
||||||
envMap = make(map[string]string, 0)
|
envMap = make(map[string]string)
|
||||||
|
|
||||||
for _, ev := range envs {
|
for _, ev := range envs {
|
||||||
kv = strings.SplitN(ev, "=", 2)
|
kv = strings.SplitN(ev, "=", 2)
|
||||||
@ -35,7 +35,7 @@ func nativizeEnvMap(stringMap map[string]string) (envMap map[string]interface{})
|
|||||||
var pathVar string = internal.GetPathEnvName()
|
var pathVar string = internal.GetPathEnvName()
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
envMap = make(map[string]interface{}, 0)
|
envMap = make(map[string]interface{})
|
||||||
|
|
||||||
for k, v := range stringMap {
|
for k, v := range stringMap {
|
||||||
|
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
package envs
|
package envs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
"golang.org/x/sys/windows/registry"
|
"golang.org/x/sys/windows/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
3
fsutils/TODO
Normal file
3
fsutils/TODO
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
- XATTRS
|
||||||
|
(see FS_XFLAG_* in fs.h, FS_IOC_FSGETXATTR/FS_IOC_FSSETXATTR)
|
||||||
|
- fs label, UUID? (fs.h)
|
@ -1,101 +1,36 @@
|
|||||||
package fsutils
|
package fsutils
|
||||||
|
|
||||||
import (
|
|
||||||
`github.com/g0rbe/go-chattr`
|
|
||||||
)
|
|
||||||
|
|
||||||
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h
|
|
||||||
const (
|
|
||||||
SecureDelete uint32 = chattr.FS_SECRM_FL // Secure deletion
|
|
||||||
UnDelete = chattr.FS_UNRM_FL // Undelete
|
|
||||||
CompressFile = chattr.FS_COMPR_FL // Compress file
|
|
||||||
SyncUpdatechattr = chattr.FS_SYNC_FL // Synchronous updates
|
|
||||||
Immutable = chattr.FS_IMMUTABLE_FL // Immutable file
|
|
||||||
AppendOnly = chattr.FS_APPEND_FL // Writes to file may only append
|
|
||||||
NoDumpFile = chattr.FS_NODUMP_FL // Do not dump file
|
|
||||||
NoUpdateAtime = chattr.FS_NOATIME_FL // Do not update atime
|
|
||||||
IsDirty = chattr.FS_DIRTY_FL // Nobody knows what this does, lol.
|
|
||||||
CompressedClusters = chattr.FS_COMPRBLK_FL // One or more compressed clusters
|
|
||||||
NoCompress = chattr.FS_NOCOMP_FL // Don't compress
|
|
||||||
EncFile = chattr.FS_ENCRYPT_FL // Encrypted file
|
|
||||||
BtreeFmt = chattr.FS_BTREE_FL // Btree format dir
|
|
||||||
HashIdxDir = chattr.FS_INDEX_FL // Hash-indexed directory
|
|
||||||
AfsDir = chattr.FS_IMAGIC_FL // AFS directory
|
|
||||||
ReservedExt3 = chattr.FS_JOURNAL_DATA_FL // Reserved for ext3
|
|
||||||
NoMergeTail = chattr.FS_NOTAIL_FL // File tail should not be merged
|
|
||||||
DirSync = chattr.FS_DIRSYNC_FL // dirsync behaviour (directories only)
|
|
||||||
DirTop = chattr.FS_TOPDIR_FL // Top of directory hierarchies
|
|
||||||
ReservedExt4a = chattr.FS_HUGE_FILE_FL // Reserved for ext4
|
|
||||||
Extents = chattr.FS_EXTENT_FL // Extents
|
|
||||||
LargeEaInode = chattr.FS_EA_INODE_FL // Inode used for large EA
|
|
||||||
ReservedExt4b = chattr.FS_EOFBLOCKS_FL // Reserved for ext4
|
|
||||||
NoCOWFile = chattr.FS_NOCOW_FL // Do not cow file
|
|
||||||
ReservedExt4c = chattr.FS_INLINE_DATA_FL // Reserved for ext4
|
|
||||||
UseParentProjId = chattr.FS_PROJINHERIT_FL // Create with parents projid
|
|
||||||
ReservedExt2 = chattr.FS_RESERVED_FL // Reserved for ext2 lib
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// AttrNameValueMap contains a mapping of attribute names (as designated by this package) to their flag values.
|
|
||||||
AttrNameValueMap map[string]uint32 = map[string]uint32{
|
|
||||||
"SecureDelete": SecureDelete,
|
|
||||||
"UnDelete": UnDelete,
|
|
||||||
"CompressFile": CompressFile,
|
|
||||||
"SyncUpdatechattr": SyncUpdatechattr,
|
|
||||||
"Immutable": Immutable,
|
|
||||||
"AppendOnly": AppendOnly,
|
|
||||||
"NoDumpFile": NoDumpFile,
|
|
||||||
"NoUpdateAtime": NoUpdateAtime,
|
|
||||||
"IsDirty": IsDirty,
|
|
||||||
"CompressedClusters": CompressedClusters,
|
|
||||||
"NoCompress": NoCompress,
|
|
||||||
"EncFile": EncFile,
|
|
||||||
"BtreeFmt": BtreeFmt,
|
|
||||||
"HashIdxDir": HashIdxDir,
|
|
||||||
"AfsDir": AfsDir,
|
|
||||||
"ReservedExt3": ReservedExt3,
|
|
||||||
"NoMergeTail": NoMergeTail,
|
|
||||||
"DirSync": DirSync,
|
|
||||||
"DirTop": DirTop,
|
|
||||||
"ReservedExt4a": ReservedExt4a,
|
|
||||||
"Extents": Extents,
|
|
||||||
"LargeEaInode": LargeEaInode,
|
|
||||||
"ReservedExt4b": ReservedExt4b,
|
|
||||||
"NoCOWFile": NoCOWFile,
|
|
||||||
"ReservedExt4c": ReservedExt4c,
|
|
||||||
"UseParentProjId": UseParentProjId,
|
|
||||||
"ReservedExt2": ReservedExt2,
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
AttrValueNameMap contains a mapping of attribute flags to their names (as designated by this package).
|
linuxFsAttrsListOrder defines the order the attributes are printed in per e2fsprogs.
|
||||||
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so be forewarned.
|
|
||||||
|
See flags_name at https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c for order.
|
||||||
|
Up to date as of e2fsprogs v1.47.1, Linux 6.12-rc7.
|
||||||
|
|
||||||
|
The below are the struct field names for easier reflection.
|
||||||
*/
|
*/
|
||||||
AttrValueNameMap map[uint32]string = map[uint32]string{
|
linuxFsAttrsListOrder []string = []string{
|
||||||
SecureDelete: "SecureDelete",
|
"SecureDelete",
|
||||||
UnDelete: "UnDelete",
|
"UnDelete",
|
||||||
CompressFile: "CompressFile",
|
"SyncUpdate",
|
||||||
SyncUpdatechattr: "SyncUpdatechattr",
|
"DirSync",
|
||||||
Immutable: "Immutable",
|
"Immutable",
|
||||||
AppendOnly: "AppendOnly",
|
"AppendOnly",
|
||||||
NoDumpFile: "NoDumpFile",
|
"NoDumpFile",
|
||||||
NoUpdateAtime: "NoUpdateAtime",
|
"NoUpdateAtime",
|
||||||
IsDirty: "IsDirty",
|
"CompressFile",
|
||||||
CompressedClusters: "CompressedClusters",
|
"EncFile",
|
||||||
NoCompress: "NoCompress",
|
"ReservedExt3",
|
||||||
EncFile: "EncFile",
|
"HashIdxDir",
|
||||||
BtreeFmt: "BtreeFmt|HashIdxDir", // Well THIS is silly and seems like an oversight. Both FS_BTREE_FL and FS_INDEX_FL have the same flag. Confirmed in kernel source.
|
"NoMergeTail",
|
||||||
AfsDir: "AfsDir",
|
"DirTop",
|
||||||
ReservedExt3: "ReservedExt3",
|
"Extents",
|
||||||
NoMergeTail: "NoMergeTail",
|
"NoCOWFile",
|
||||||
DirSync: "DirSync",
|
"DAX",
|
||||||
DirTop: "DirTop",
|
"CaseInsensitive",
|
||||||
ReservedExt4a: "ReservedExt4a",
|
"ReservedExt4c",
|
||||||
Extents: "Extents",
|
"UseParentProjId",
|
||||||
LargeEaInode: "LargeEaInode",
|
"VerityProtected",
|
||||||
ReservedExt4b: "ReservedExt4b",
|
"NoCompress",
|
||||||
NoCOWFile: "NoCOWFile",
|
|
||||||
ReservedExt4c: "ReservedExt4c",
|
|
||||||
UseParentProjId: "UseParentProjId",
|
|
||||||
ReservedExt2: "ReservedExt2",
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
127
fsutils/consts_lin.go
Normal file
127
fsutils/consts_lin.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package fsutils
|
||||||
|
|
||||||
|
/*
|
||||||
|
https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h "Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)"
|
||||||
|
Up to date as of Linux 6.12-rc7.
|
||||||
|
*/
|
||||||
|
const (
|
||||||
|
SecureDelete fsAttr = 1 << iota // Secure deletion
|
||||||
|
UnDelete // Undelete
|
||||||
|
CompressFile // Compress file
|
||||||
|
SyncUpdate // Synchronous updates
|
||||||
|
Immutable // Immutable file
|
||||||
|
AppendOnly // Writes to file may only append
|
||||||
|
NoDumpFile // Do not dump file
|
||||||
|
NoUpdateAtime // Do not update atime
|
||||||
|
IsDirty // Nobody knows what this does, lol.
|
||||||
|
CompressedClusters // One or more compressed clusters
|
||||||
|
NoCompress // Don't compress
|
||||||
|
EncFile // Encrypted file
|
||||||
|
BtreeFmt // Btree format dir
|
||||||
|
AfsDir // AFS directory
|
||||||
|
ReservedExt3 // Reserved for ext3
|
||||||
|
NoMergeTail // File tail should not be merged
|
||||||
|
DirSync // dirsync behaviour (directories only)
|
||||||
|
DirTop // Top of directory hierarchies
|
||||||
|
ReservedExt4a // Reserved for ext4
|
||||||
|
Extents // Extents
|
||||||
|
VerityProtected // Verity-protected inode
|
||||||
|
LargeEaInode // Inode used for large EA
|
||||||
|
ReservedExt4b // Reserved for ext4
|
||||||
|
NoCOWFile // Do not cow file
|
||||||
|
_ // (Unused)
|
||||||
|
DAX // Inode is DAX
|
||||||
|
_ // (Unused)
|
||||||
|
_ // (Unused)
|
||||||
|
ReservedExt4c // Reserved for ext4
|
||||||
|
UseParentProjId // Create with parents projid
|
||||||
|
CaseInsensitive // Folder is case-insensitive
|
||||||
|
ReservedExt2 // Reserved for ext2 lib
|
||||||
|
)
|
||||||
|
|
||||||
|
// These are the same value. For some reason.
|
||||||
|
const (
|
||||||
|
HashIdxDir fsAttr = BtreeFmt // Hash-indexed directory
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// AttrNameValueMap contains a mapping of attribute names (as designated by this package) to their flag values.
|
||||||
|
AttrNameValueMap map[string]fsAttr = map[string]fsAttr{
|
||||||
|
"SecureDelete": SecureDelete,
|
||||||
|
"UnDelete": UnDelete,
|
||||||
|
"CompressFile": CompressFile,
|
||||||
|
"SyncUpdate": SyncUpdate,
|
||||||
|
"Immutable": Immutable,
|
||||||
|
"AppendOnly": AppendOnly,
|
||||||
|
"NoDumpFile": NoDumpFile,
|
||||||
|
"NoUpdateAtime": NoUpdateAtime,
|
||||||
|
"IsDirty": IsDirty,
|
||||||
|
"CompressedClusters": CompressedClusters,
|
||||||
|
"NoCompress": NoCompress,
|
||||||
|
"EncFile": EncFile,
|
||||||
|
"BtreeFmt": BtreeFmt,
|
||||||
|
"HashIdxDir": HashIdxDir,
|
||||||
|
"AfsDir": AfsDir,
|
||||||
|
"ReservedExt3": ReservedExt3,
|
||||||
|
"NoMergeTail": NoMergeTail,
|
||||||
|
"DirSync": DirSync,
|
||||||
|
"DirTop": DirTop,
|
||||||
|
"ReservedExt4a": ReservedExt4a,
|
||||||
|
"Extents": Extents,
|
||||||
|
"VerityProtected": VerityProtected,
|
||||||
|
"LargeEaInode": LargeEaInode,
|
||||||
|
"ReservedExt4b": ReservedExt4b,
|
||||||
|
"NoCOWFile": NoCOWFile,
|
||||||
|
"DAX": DAX,
|
||||||
|
"ReservedExt4c": ReservedExt4c,
|
||||||
|
"UseParentProjId": UseParentProjId,
|
||||||
|
"CaseInsensitive": CaseInsensitive,
|
||||||
|
"ReservedExt2": ReservedExt2,
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AttrValueNameMap contains a mapping of attribute flags to their names (as designated by this package).
|
||||||
|
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so their string value is unpredictable.
|
||||||
|
*/
|
||||||
|
AttrValueNameMap map[fsAttr]string = invertMap(AttrNameValueMap)
|
||||||
|
|
||||||
|
// KernelNameValueMap allows lookups using the symbol name as used in the Linux kernel source.
|
||||||
|
KernelNameValueMap map[string]fsAttr = map[string]fsAttr{
|
||||||
|
"FS_SECRM_FL": SecureDelete,
|
||||||
|
"FS_UNRM_FL": UnDelete,
|
||||||
|
"FS_COMPR_FL": CompressFile,
|
||||||
|
"FS_SYNC_FL": SyncUpdate,
|
||||||
|
"FS_IMMUTABLE_FL": Immutable,
|
||||||
|
"FS_APPEND_FL": AppendOnly,
|
||||||
|
"FS_NODUMP_FL": NoDumpFile,
|
||||||
|
"FS_NOATIME_FL": NoUpdateAtime,
|
||||||
|
"FS_DIRTY_FL": IsDirty,
|
||||||
|
"FS_COMPRBLK_FL": CompressedClusters,
|
||||||
|
"FS_NOCOMP_FL": NoCompress,
|
||||||
|
"FS_ENCRYPT_FL": EncFile,
|
||||||
|
"FS_BTREE_FL": BtreeFmt,
|
||||||
|
"FS_INDEX_FL": HashIdxDir,
|
||||||
|
"FS_IMAGIC_FL": AfsDir,
|
||||||
|
"FS_JOURNAL_DATA_FL": ReservedExt3,
|
||||||
|
"FS_NOTAIL_FL": NoMergeTail,
|
||||||
|
"FS_DIRSYNC_FL": DirSync,
|
||||||
|
"FS_TOPDIR_FL": DirTop,
|
||||||
|
"FS_HUGE_FILE_FL": ReservedExt4a,
|
||||||
|
"FS_EXTENT_FL": Extents,
|
||||||
|
"FS_VERITY_FL": VerityProtected,
|
||||||
|
"FS_EA_INODE_FL": LargeEaInode,
|
||||||
|
"FS_EOFBLOCKS_FL": ReservedExt4b,
|
||||||
|
"FS_NOCOW_FL": NoCOWFile,
|
||||||
|
"FS_DAX_FL": DAX,
|
||||||
|
"FS_INLINE_DATA_FL": ReservedExt4c,
|
||||||
|
"FS_PROJINHERIT_FL": UseParentProjId,
|
||||||
|
"FS_CASEFOLD_FL": CaseInsensitive,
|
||||||
|
"FS_RESERVED_FL": ReservedExt2,
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
KernelValueNameMap contains a mapping of attribute flags to their kernel source symbol name.
|
||||||
|
Note the oddball here, BtreeFmt and HashIdxDir are actually the same value, so their string value is unpredictable.
|
||||||
|
*/
|
||||||
|
KernelValueNameMap map[fsAttr]string = invertMap(KernelNameValueMap)
|
||||||
|
)
|
7
fsutils/doc.go
Normal file
7
fsutils/doc.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
/*
|
||||||
|
fsutils is a collection of filesystem-related functions, types, etc.
|
||||||
|
|
||||||
|
Currently it's only a (fixed/actually working) reimplementation of github.com/g0rbe/go-chattr.
|
||||||
|
(Note to library maintainers, if someone reports an integer overflow and even tells you how to fix it, you should probably fix it.)
|
||||||
|
*/
|
||||||
|
package fsutils
|
11
fsutils/errs.go
Normal file
11
fsutils/errs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package fsutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
`syscall`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Yes, I know. "Why ENOTTY?" I don't know, ask Linus.
|
||||||
|
// If you see "inappropriate ioctl for device", it's this'un.
|
||||||
|
ErrFsAttrsUnsupported error = syscall.ENOTTY
|
||||||
|
)
|
@ -1,44 +1,16 @@
|
|||||||
package fsutils
|
package fsutils
|
||||||
|
|
||||||
import (
|
// invertMap returns some handy consts remapping for easier lookups.
|
||||||
`os`
|
func invertMap(origMap map[string]fsAttr) (newMap map[fsAttr]string) {
|
||||||
`reflect`
|
|
||||||
|
|
||||||
`github.com/g0rbe/go-chattr`
|
if origMap == nil {
|
||||||
`r00t2.io/sysutils/paths`
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetAttrs(path string) (attrs *FsAttrs, err error) {
|
|
||||||
|
|
||||||
var f *os.File
|
|
||||||
var evalAttrs FsAttrs
|
|
||||||
var attrVal uint32
|
|
||||||
var reflectVal reflect.Value
|
|
||||||
var field reflect.Value
|
|
||||||
var myPath string = path
|
|
||||||
|
|
||||||
if err = paths.RealPath(&myPath); err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
newMap = make(map[fsAttr]string)
|
||||||
|
|
||||||
if f, err = os.Open(myPath); err != nil {
|
for k, v := range origMap {
|
||||||
return
|
newMap[v] = k
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
reflectVal = reflect.ValueOf(&evalAttrs).Elem()
|
|
||||||
|
|
||||||
if attrVal, err = chattr.GetAttrs(f); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for attrNm, attrInt := range AttrNameValueMap {
|
|
||||||
field = reflectVal.FieldByName(attrNm)
|
|
||||||
field.SetBool((attrVal & attrInt) != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs = new(FsAttrs)
|
|
||||||
*attrs = evalAttrs
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,43 +1,96 @@
|
|||||||
package fsutils
|
package fsutils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
`os`
|
|
||||||
`reflect`
|
`reflect`
|
||||||
|
`strings`
|
||||||
`github.com/g0rbe/go-chattr`
|
|
||||||
`r00t2.io/sysutils/paths`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *FsAttrs) Apply(path string) (err error) {
|
/*
|
||||||
|
String returns a string representation (comparable to lsattr(1)) of an FsAttrs.
|
||||||
|
|
||||||
var file *os.File
|
Not all flags are represented, as this aims for compatibility with e2fsprogs/lsattr output.
|
||||||
var reflectVal reflect.Value
|
*/
|
||||||
|
func (f *FsAttrs) String() (s string) {
|
||||||
|
|
||||||
|
// Flags have their short name printed if set, otherwise a '-' placeholder is used.
|
||||||
|
// https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c
|
||||||
|
|
||||||
|
var refType reflect.Type
|
||||||
|
var refVal reflect.Value
|
||||||
|
var refField reflect.StructField
|
||||||
var fieldVal reflect.Value
|
var fieldVal reflect.Value
|
||||||
|
var tagVal string
|
||||||
|
var sb strings.Builder
|
||||||
|
|
||||||
var myPath string = path
|
if f == nil {
|
||||||
|
s = strings.Repeat("-", len(linuxFsAttrsListOrder))
|
||||||
if err = paths.RealPath(&myPath); err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if file, err = os.Open(myPath); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
reflectVal = reflect.ValueOf(*f)
|
refVal = reflect.ValueOf(*f)
|
||||||
|
refType = refVal.Type()
|
||||||
for attrNm, attrVal := range AttrNameValueMap {
|
for _, fn := range linuxFsAttrsListOrder {
|
||||||
fieldVal = reflectVal.FieldByName(attrNm)
|
refField, _ = refType.FieldByName(fn)
|
||||||
|
tagVal = refField.Tag.Get("fsAttrShort")
|
||||||
|
if tagVal == "" || tagVal == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fieldVal = refVal.FieldByName(fn)
|
||||||
if fieldVal.Bool() {
|
if fieldVal.Bool() {
|
||||||
if err = chattr.SetAttr(file, attrVal); err != nil {
|
sb.WriteString(tagVal)
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if err = chattr.UnsetAttr(file, attrVal); err != nil {
|
sb.WriteString("-")
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s = sb.String()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
StringLong returns a more extensive/"human-friendly" representation (comparable to lsattr(1) wiih -l) of an Fsattrs.
|
||||||
|
|
||||||
|
Not all flags are represented, as this aims for compatibility with e2fsprogs/lsattr output.
|
||||||
|
*/
|
||||||
|
func (f *FsAttrs) StringLong() (s string) {
|
||||||
|
|
||||||
|
// The long names are separated via a commma then a space.
|
||||||
|
// If no attrs are set, the string "---" is used.
|
||||||
|
// https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/e2p/pf.c
|
||||||
|
|
||||||
|
var refType reflect.Type
|
||||||
|
var refVal reflect.Value
|
||||||
|
var refField reflect.StructField
|
||||||
|
var fieldVal reflect.Value
|
||||||
|
var tagVal string
|
||||||
|
var out []string
|
||||||
|
|
||||||
|
if f == nil {
|
||||||
|
s = strings.Repeat("-", 3)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
refVal = reflect.ValueOf(*f)
|
||||||
|
refType = refVal.Type()
|
||||||
|
for _, fn := range linuxFsAttrsListOrder {
|
||||||
|
refField, _ = refType.FieldByName(fn)
|
||||||
|
tagVal = refField.Tag.Get("fsAttrLong")
|
||||||
|
if tagVal == "" || tagVal == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fieldVal = refVal.FieldByName(fn)
|
||||||
|
if fieldVal.Bool() {
|
||||||
|
out = append(out, tagVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if out == nil || len(out) == 0 {
|
||||||
|
s = strings.Repeat("-", 3)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s = strings.Join(out, ", ")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
46
fsutils/funcs_fsattrs_linux.go
Normal file
46
fsutils/funcs_fsattrs_linux.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package fsutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
`os`
|
||||||
|
`reflect`
|
||||||
|
|
||||||
|
`r00t2.io/sysutils/paths`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *FsAttrs) Apply(path string) (err error) {
|
||||||
|
|
||||||
|
var file *os.File
|
||||||
|
var reflectVal reflect.Value
|
||||||
|
var fieldVal reflect.Value
|
||||||
|
|
||||||
|
if f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = paths.RealPath(&path); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if file, err = os.Open(path); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
reflectVal = reflect.ValueOf(*f)
|
||||||
|
|
||||||
|
for attrNm, attrVal := range AttrNameValueMap {
|
||||||
|
fieldVal = reflectVal.FieldByName(attrNm)
|
||||||
|
if fieldVal.Bool() {
|
||||||
|
if err = setAttrs(file, attrVal); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err = unsetAttrs(file, attrVal); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
135
fsutils/funcs_linux.go
Normal file
135
fsutils/funcs_linux.go
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package fsutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
`os`
|
||||||
|
`reflect`
|
||||||
|
`unsafe`
|
||||||
|
|
||||||
|
`golang.org/x/sys/unix`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
`r00t2.io/sysutils/paths`
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetAttrs(path string) (attrs *FsAttrs, err error) {
|
||||||
|
|
||||||
|
var f *os.File
|
||||||
|
var evalAttrs FsAttrs
|
||||||
|
var attrVal fsAttr
|
||||||
|
var attrValBit bitmask.MaskBit
|
||||||
|
var reflectVal reflect.Value
|
||||||
|
var field reflect.Value
|
||||||
|
var myPath string = path
|
||||||
|
|
||||||
|
if err = paths.RealPath(&myPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if f, err = os.Open(myPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
reflectVal = reflect.ValueOf(&evalAttrs).Elem()
|
||||||
|
|
||||||
|
if attrVal, err = getAttrs(f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
attrValBit = bitmask.MaskBit(attrVal)
|
||||||
|
|
||||||
|
for attrNm, attrInt := range AttrNameValueMap {
|
||||||
|
field = reflectVal.FieldByName(attrNm)
|
||||||
|
field.SetBool(attrValBit.HasFlag(bitmask.MaskBit(attrInt)))
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs = new(FsAttrs)
|
||||||
|
*attrs = evalAttrs
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAttrs is the unexported low-level syscall to get attributes.
|
||||||
|
func getAttrs(f *os.File) (attrVal fsAttr, err error) {
|
||||||
|
|
||||||
|
var u uint
|
||||||
|
var curFlags int
|
||||||
|
// var errNo syscall.Errno
|
||||||
|
|
||||||
|
/*
|
||||||
|
if _, _, errNo = unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.FS_IOC_GETFLAGS, uintptr(unsafe.Pointer(&curFlags))); errNo != 0 {
|
||||||
|
err = os.NewSyscallError("ioctl: FS_IOC_GETFLAGS", errNo)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
if curFlags, err = unix.IoctlGetInt(int(f.Fd()), unix.FS_IOC_GETFLAGS); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
u = uint(curFlags)
|
||||||
|
|
||||||
|
attrVal = fsAttr(u)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// setAttrs is the unexported low-level syscall to set attributes. attrs may be OR'd.
|
||||||
|
func setAttrs(f *os.File, attrs fsAttr) (err error) {
|
||||||
|
|
||||||
|
var curAttrs fsAttr
|
||||||
|
var ab bitmask.MaskBit
|
||||||
|
var errNo unix.Errno
|
||||||
|
var val uint
|
||||||
|
|
||||||
|
if curAttrs, err = getAttrs(f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ab = bitmask.MaskBit(curAttrs)
|
||||||
|
|
||||||
|
if ab.HasFlag(bitmask.MaskBit(attrs)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ab.AddFlag(bitmask.MaskBit(attrs))
|
||||||
|
|
||||||
|
val = ab.Value()
|
||||||
|
|
||||||
|
/*
|
||||||
|
if err = unix.IoctlSetInt(int(f.Fd()), unix.FS_IOC_SETFLAGS, int(ab.Value())); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
if _, _, errNo = unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.FS_IOC_SETFLAGS, uintptr(unsafe.Pointer(&val))); errNo != 0 {
|
||||||
|
err = os.NewSyscallError("ioctl: SYS_IOCTL", errNo)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsetAttrs is the unexported low-level syscall to remove attributes. attrs may be OR'd.
|
||||||
|
func unsetAttrs(f *os.File, attrs fsAttr) (err error) {
|
||||||
|
|
||||||
|
var curAttrs fsAttr
|
||||||
|
var ab bitmask.MaskBit
|
||||||
|
|
||||||
|
if curAttrs, err = getAttrs(f); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ab = bitmask.MaskBit(curAttrs)
|
||||||
|
|
||||||
|
// TODO: Should this be IsOneOf instad of HasFlag?
|
||||||
|
if !ab.HasFlag(bitmask.MaskBit(attrs)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ab.ClearFlag(bitmask.MaskBit(attrs))
|
||||||
|
|
||||||
|
/*
|
||||||
|
if err = unix.IoctlSetInt(int(f.Fd()), unix.FS_IOC_SETFLAGS, int(ab.Value())); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
package fsutils
|
package fsutils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -7,12 +9,13 @@ import (
|
|||||||
`os/user`
|
`os/user`
|
||||||
`testing`
|
`testing`
|
||||||
|
|
||||||
|
`github.com/davecgh/go-spew/spew`
|
||||||
`r00t2.io/sysutils/paths`
|
`r00t2.io/sysutils/paths`
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
testFilename string = "testfile"
|
testFilename string = "testfile"
|
||||||
testErrBadUser error = errors.New("test must be run as root, on Linux")
|
testErrBadUser error = errors.New("test must be run as root")
|
||||||
)
|
)
|
||||||
|
|
||||||
func testChkUser() (err error) {
|
func testChkUser() (err error) {
|
||||||
@ -36,12 +39,18 @@ func TestSetAttrs(t *testing.T) {
|
|||||||
if attrs, err = GetAttrs(testFilename); err != nil {
|
if attrs, err = GetAttrs(testFilename); err != nil {
|
||||||
t.Fatalf("Failed to get attrs for %v: %v", testFilename, err)
|
t.Fatalf("Failed to get attrs for %v: %v", testFilename, err)
|
||||||
}
|
}
|
||||||
t.Logf("Attrs for %v:\n%#v", testFilename, attrs)
|
t.Logf("Attrs for %v (before):\n%s", testFilename, spew.Sdump(attrs))
|
||||||
attrs.CompressFile = true
|
attrs.CompressFile = true
|
||||||
|
attrs.SyncUpdate = true
|
||||||
|
attrs.SecureDelete = true
|
||||||
if err = attrs.Apply(testFilename); err != nil {
|
if err = attrs.Apply(testFilename); err != nil {
|
||||||
t.Fatalf("Failed to apply attrs to %v: %v", testFilename, err)
|
t.Fatalf("Failed to apply attrs to %v: %v", testFilename, err)
|
||||||
}
|
}
|
||||||
t.Logf("Applied new attrs to %v:\n%#v", testFilename, attrs)
|
t.Logf("Applied new attrs to %v:\n%#v", testFilename, attrs)
|
||||||
|
if attrs, err = GetAttrs(testFilename); err != nil {
|
||||||
|
t.Fatalf("Failed to get attrs for %v: %v", testFilename, err)
|
||||||
|
}
|
||||||
|
t.Logf("Attrs for %v (after):\n%s", testFilename, spew.Sdump(attrs))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMain(t *testing.M) {
|
func TestMain(t *testing.M) {
|
@ -1,32 +1,44 @@
|
|||||||
package fsutils
|
package fsutils
|
||||||
|
|
||||||
// FsAttrs is a convenience struct around github.com/g0rbe/go-chattr.
|
import (
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
)
|
||||||
|
|
||||||
|
type fsAttr bitmask.MaskBit
|
||||||
|
|
||||||
|
/*
|
||||||
|
FsAttrs is a struct representation of filesystem attributes on Linux.
|
||||||
|
Up to date as of Linux 6.12-rc7.
|
||||||
|
*/
|
||||||
type FsAttrs struct {
|
type FsAttrs struct {
|
||||||
SecureDelete bool
|
SecureDelete bool `fsAttrShort:"s" fsAttrLong:"Secure_Deletion" fsAttrKern:"FS_SECRM_FL" json:"secure_delete" toml:"SecureDelete" yaml:"Secure Delete" xml:"secureDelete,attr"`
|
||||||
UnDelete bool
|
UnDelete bool `fsAttrShort:"u" fsAttrLong:"Undelete" fsAttrKern:"FS_UNRM_FL" json:"undelete" toml:"Undelete" yaml:"Undelete" xml:"undelete,attr"`
|
||||||
CompressFile bool
|
CompressFile bool `fsAttrShort:"c" fsAttrLong:"Compression_Requested" fsAttrKern:"FS_COMPR_FL" json:"compress" toml:"Compress" yaml:"Compress" xml:"compress,attr"`
|
||||||
SyncUpdatechattr bool
|
SyncUpdate bool `fsAttrShort:"S" fsAttrLong:"Synchronous_Updates" fsAttrKern:"FS_SYNC_FL" json:"sync" toml:"SyncUpdate" yaml:"Synchronized Update" xml:"syncUpdate,attr"`
|
||||||
Immutable bool
|
Immutable bool `fsAttrShort:"i" fsAttrLong:"Immutable" fsAttrKern:"FS_IMMUTABLE_FL" json:"immutable" toml:"Immutable" yaml:"Immutable" xml:"immutable,attr"`
|
||||||
AppendOnly bool
|
AppendOnly bool `fsAttrShort:"a" fsAttrLong:"Append_Only" fsAttrKern:"FS_APPEND_FL" json:"append_only" toml:"AppendOnly" yaml:"Append Only" xml:"appendOnly,attr"`
|
||||||
NoDumpFile bool
|
NoDumpFile bool `fsAttrShort:"d" fsAttrLong:"No_Dump" fsAttrKern:"FS_NODUMP_FL" json:"no_dump" toml:"NoDump" yaml:"Disable Dumping" xml:"noDump,attr"`
|
||||||
NoUpdateAtime bool
|
NoUpdateAtime bool `fsAttrShort:"A" fsAttrLong:"No_Atime" fsAttrKern:"FS_NOATIME_FL" json:"no_atime" toml:"DisableAtime" yaml:"Disable Atime Updating" xml:"noAtime,attr"`
|
||||||
IsDirty bool
|
IsDirty bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_DIRTY_FL" json:"dirty" toml:"Dirty" yaml:"Dirty" xml:"dirty,attr"`
|
||||||
CompressedClusters bool
|
CompressedClusters bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_COMPRBLK_FL" json:"compress_clst" toml:"CompressedClusters" yaml:"Compressed Clusters" xml:"compressClst,attr"`
|
||||||
NoCompress bool
|
NoCompress bool `fsAttrShort:"m" fsAttrLong:"Dont_Compress" fsAttrKern:"FS_NOCOMP_FL" json:"no_compress" toml:"DisableCompression" yaml:"Disable Compression" xml:"noCompress,attr"`
|
||||||
EncFile bool
|
EncFile bool `fsAttrShort:"E" fsAttrLong:"Encrypted" fsAttrKern:"FS_ENCRYPT_FL" json:"enc" toml:"Encrypted" yaml:"Encrypted" xml:"enc,attr"`
|
||||||
BtreeFmt bool
|
BtreeFmt bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_BTREE_FL" json:"btree" toml:"Btree" yaml:"Btree" xml:"btree,attr"`
|
||||||
HashIdxDir bool
|
HashIdxDir bool `fsAttrShort:"I" fsAttrLong:"Indexed_directory" fsAttrKern:"FS_INDEX_FL" json:"idx_dir" toml:"IdxDir" yaml:"Indexed Directory" xml:"idxDir,attr"`
|
||||||
AfsDir bool
|
AfsDir bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_IMAGIC_FL" json:"afs" toml:"AFS" yaml:"AFS" xml:"afs,attr"`
|
||||||
ReservedExt3 bool
|
ReservedExt3 bool `fsAttrShort:"j" fsAttrLong:"Journaled_Data" fsAttrKern:"FS_JOURNAL_DATA_FL" json:"res_ext3" toml:"ReservedExt3" yaml:"Reserved Ext3" xml:"resExt3,attr"`
|
||||||
NoMergeTail bool
|
NoMergeTail bool `fsAttrShort:"t" fsAttrLong:"No_Tailmerging" fsAttrKern:"FS_NOTAIL_FL" json:"no_merge_tail" toml:"DisableTailmerging" yaml:"Disable Tailmerging" xml:"noMergeTail,attr"`
|
||||||
DirSync bool
|
DirSync bool `fsAttrShort:"D" fsAttrLong:"Synchronous_Directory_Updates" fsAttrKern:"FS_DIRSYNC_FL" json:"dir_sync" toml:"DirSync" yaml:"Synchronized Directory Updates" xml:"dirSync,attr"`
|
||||||
DirTop bool
|
DirTop bool `fsAttrShort:"T" fsAttrLong:"Top_of_Directory_Hierarchies" fsAttrKern:"FS_TOPDIR_FL" json:"dir_top" toml:"DirTop" yaml:"Top of Directory Hierarchies" xml:"dirTop,attr"`
|
||||||
ReservedExt4a bool
|
ReservedExt4a bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_HUGE_FILE_FL" json:"res_ext4a" toml:"ReservedExt4A" yaml:"Reserved Ext4 A" xml:"resExt4a,attr"`
|
||||||
Extents bool
|
Extents bool `fsAttrShort:"e" fsAttrLong:"Extents" fsAttrKern:"FS_EXTENT_FL" json:"extents" toml:"Extents" yaml:"Extents" xml:"extents,attr"`
|
||||||
LargeEaInode bool
|
VerityProtected bool `fsAttrShort:"V" fsAttrLong:"Verity" fsAttrKern:"FS_VERITY_FL" json:"verity" toml:"Verity" yaml:"Verity Protected" xml:"verity,attr"`
|
||||||
ReservedExt4b bool
|
LargeEaInode bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_EA_INODE_FL" json:"ea" toml:"EAInode" yaml:"EA Inode" xml:"ea,attr"`
|
||||||
NoCOWFile bool
|
ReservedExt4b bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_EOFBLOCKS_FL" json:"res_ext4b" toml:"ReservedExt4B" yaml:"Reserved Ext4 B" xml:"resExt4b,attr"`
|
||||||
ReservedExt4c bool
|
NoCOWFile bool `fsAttrShort:"C" fsAttrLong:"No_COW" fsAttrKern:"FS_NOCOW_FL" json:"no_cow" toml:"NoCOW" yaml:"Disable COW" xml:"noCOW,attr"`
|
||||||
UseParentProjId bool
|
DAX bool `fsAttrShort:"x" fsAttrLong:"DAX" fsAttrKern:"FS_DAX_FL" json:"dax" toml:"DAX" yaml:"DAX" xml:"DAX,attr"`
|
||||||
ReservedExt2 bool
|
ReservedExt4c bool `fsAttrShort:"N" fsAttrLong:"Inline_Data" fsAttrKern:"FS_INLINE_DATA_FL" json:"res_ext4c" toml:"ReservedExt4C" yaml:"Reserved Ext4 C" xml:"resExt4c,attr"`
|
||||||
|
UseParentProjId bool `fsAttrShort:"P" fsAttrLong:"Project_Hierarchy" fsAttrKern:"FS_PROJINHERIT_FL" json:"parent_proj_id" toml:"ParentProjId" yaml:"Use Parent Project ID" xml:"parentProjId,attr"`
|
||||||
|
CaseInsensitive bool `fsAttrShort:"F" fsAttrLong:"Casefold" fsAttrKern:"FS_CASEFOLD_FL" json:"case_ins" toml:"CaseInsensitive" yaml:"Case Insensitive" xml:"caseIns,attr"`
|
||||||
|
ReservedExt2 bool `fsAttrShort:"-" fsAttrLong:"-" fsAttrKern:"FS_RESERVED_FL" json:"res_ext2" toml:"ReservedExt2" yaml:"Reserved Ext2" xml:"resExt2,attr"`
|
||||||
}
|
}
|
||||||
|
163
funcs_idstate.go
Normal file
163
funcs_idstate.go
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
package sysutils
|
||||||
|
|
||||||
|
// Checked consolidates all the provided checked functions.
|
||||||
|
func (i *IDState) Checked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.uidsChecked &&
|
||||||
|
i.gidsChecked &&
|
||||||
|
i.sudoChecked &&
|
||||||
|
i.ppidUidChecked &&
|
||||||
|
i.ppidGidChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsReal consolidates all the elevation/dropped-privs checks into a single method.
|
||||||
|
|
||||||
|
It will only return true if no sudo was detected and *all* UIDs/GIDs match.
|
||||||
|
*/
|
||||||
|
func (i *IDState) IsReal(real bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
real = true
|
||||||
|
|
||||||
|
for _, b := range []bool{
|
||||||
|
i.IsSuid(),
|
||||||
|
i.IsSgid(),
|
||||||
|
i.IsSudoUser(),
|
||||||
|
i.IsSudoGroup(),
|
||||||
|
} {
|
||||||
|
if b {
|
||||||
|
real = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudoGroup is true if any of the group sudo env vars are set,
|
||||||
|
or the parent process has a different group (and is not PID 1).
|
||||||
|
|
||||||
|
It will always return false if SudoChecked returns false oor PPIDGIDsChecked returns false.
|
||||||
|
*/
|
||||||
|
func (i *IDState) IsSudoGroup() (sudo bool) {
|
||||||
|
|
||||||
|
if i == nil || !i.sudoChecked || !i.ppidGidChecked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sudo = i.SudoEnvGroup || !i.PPIDGidMatch
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudoUser is true if any of the user sudo env vars are set,
|
||||||
|
or the parent process has a different owner (and is not PID 1).
|
||||||
|
|
||||||
|
It will always return false if SudoChecked returns false or PPIDUIDsChecked returns false.
|
||||||
|
*/
|
||||||
|
func (i *IDState) IsSudoUser() (sudo bool) {
|
||||||
|
|
||||||
|
if i == nil || !i.sudoChecked || !i.ppidUidChecked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sudo = i.SudoEnvUser || !i.PPIDUidMatch
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSuid is true if the RUID does not match EUID or SUID. It will always return false if UIDsChecked returns false.
|
||||||
|
func (i *IDState) IsSuid() (suid bool) {
|
||||||
|
|
||||||
|
if i == nil || !i.uidsChecked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
suid = i.RUID != i.EUID || i.RUID != i.SUID
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSgid is true if the RGID does not match EGID or SGID. It will always return false if GIDsChecked returns false.
|
||||||
|
func (i *IDState) IsSgid() (sgid bool) {
|
||||||
|
|
||||||
|
if i == nil || !i.gidsChecked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sgid = i.RGID != i.EGID || i.RGID != i.SGID
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GIDsChecked is true if the GIDs presented can be trusted.
|
||||||
|
func (i *IDState) GIDsChecked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.gidsChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// PPIDGIDsChecked is true if PPIDGidMatch can be trusted.
|
||||||
|
func (i *IDState) PPIDGIDsChecked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.ppidGidChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// PPIDUIDsChecked is true if PPIDUidMatch can be trusted.
|
||||||
|
func (i *IDState) PPIDUIDsChecked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.ppidUidChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SudoChecked is true if SudoEnvVars can be trusted
|
||||||
|
func (i *IDState) SudoChecked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.sudoChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDsChecked is true if the UIDs presented can be trusted.
|
||||||
|
func (i *IDState) UIDsChecked() (checked bool) {
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
checked = i.uidsChecked
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
50
funcs_linux.go
Normal file
50
funcs_linux.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package sysutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
`fmt`
|
||||||
|
`os`
|
||||||
|
|
||||||
|
`golang.org/x/sys/unix`
|
||||||
|
`r00t2.io/sysutils/envs`
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetIDState returns current ID/elevation information. An IDState should *not* be explicitly created/defined.
|
||||||
|
func GetIDState() (ids IDState) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ids.RUID, ids.EUID, ids.SUID = unix.Getresuid()
|
||||||
|
ids.uidsChecked = true
|
||||||
|
ids.RGID, ids.EGID, ids.SGID = unix.Getresgid()
|
||||||
|
ids.gidsChecked = true
|
||||||
|
|
||||||
|
ids.SudoEnvCmd = envs.HasEnv("SUDO_COMMAND")
|
||||||
|
ids.SudoEnvHome = envs.HasEnv("SUDO_HOME")
|
||||||
|
ids.SudoEnvGroup = envs.HasEnv("SUDO_GID")
|
||||||
|
ids.SudoEnvUser = envs.HasEnv("SUDO_UID") || envs.HasEnv("SUDO_USER")
|
||||||
|
if ids.SudoEnvCmd || ids.SudoEnvHome || ids.SudoEnvGroup || ids.SudoEnvUser {
|
||||||
|
ids.SudoEnvVars = true
|
||||||
|
}
|
||||||
|
ids.sudoChecked = true
|
||||||
|
|
||||||
|
// PID 1 will *always* be root, so that can return a false positive for sudo.
|
||||||
|
if os.Getppid() != 1 {
|
||||||
|
ids.stat = new(unix.Stat_t)
|
||||||
|
if err = unix.Stat(
|
||||||
|
fmt.Sprintf("/proc/%d/stat", os.Getppid()),
|
||||||
|
ids.stat,
|
||||||
|
); err != nil {
|
||||||
|
err = nil
|
||||||
|
} else {
|
||||||
|
ids.PPIDUidMatch = ids.RUID == int(ids.stat.Uid)
|
||||||
|
ids.ppidUidChecked = true
|
||||||
|
ids.PPIDGidMatch = ids.RGID == int(ids.stat.Gid)
|
||||||
|
ids.ppidGidChecked = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ids.ppidUidChecked = true
|
||||||
|
ids.ppidGidChecked = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
28
go.mod
28
go.mod
@ -1,26 +1,24 @@
|
|||||||
module r00t2.io/sysutils
|
module r00t2.io/sysutils
|
||||||
|
|
||||||
go 1.21
|
go 1.24.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/g0rbe/go-chattr v1.0.1
|
github.com/djherbis/times v1.6.0
|
||||||
github.com/go-playground/validator/v10 v10.22.0
|
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
golang.org/x/sys v0.19.0
|
github.com/shirou/gopsutil/v4 v4.25.7
|
||||||
|
golang.org/x/sync v0.16.0
|
||||||
|
golang.org/x/sys v0.35.0
|
||||||
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8
|
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8
|
||||||
r00t2.io/goutils v1.6.0
|
r00t2.io/goutils v1.9.6
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
github.com/ebitengine/purego v0.8.4 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect
|
||||||
github.com/leodido/go-urn v1.4.0 // indirect
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||||
golang.org/x/crypto v0.19.0 // indirect
|
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||||
golang.org/x/net v0.21.0 // indirect
|
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pending https://github.com/g0rbe/go-chattr/pull/3
|
|
||||||
replace github.com/g0rbe/go-chattr => github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13
|
|
||||||
|
76
go.sum
76
go.sum
@ -1,40 +1,56 @@
|
|||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc=
|
||||||
github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13 h1:tgEbuE4bNVjaCWWIB1u9lDzGqH/ZdBTg33+4vNW2rjg=
|
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/johnnybubonic/go-chattr v0.0.0-20240126141003-459f46177b13/go.mod h1:yQc6VPJfpDDC1g+W2t47+yYmzBNioax/GLiyJ25/IOs=
|
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg=
|
||||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
github.com/shirou/gopsutil/v4 v4.25.7 h1:bNb2JuqKuAu3tRlPv5piSmBZyMfecwQ+t/ILq+1JqVM=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
github.com/shirou/gopsutil/v4 v4.25.7/go.mod h1:XV/egmwJtd3ZQjBpJVY5kndsiOO4IRqy9TQnmm6VP7U=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||||
|
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||||
|
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||||
|
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
|
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||||
|
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8 h1:FW42yWB1sGClqswyHIB68wo0+oPrav1IuQ+Tdy8Qp8E=
|
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8 h1:FW42yWB1sGClqswyHIB68wo0+oPrav1IuQ+Tdy8Qp8E=
|
||||||
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8/go.mod h1:44w9OfBSQ9l3o59rc2w3AnABtE44bmtNnRMNC7z+oKE=
|
honnef.co/go/augeas v0.0.0-20161110001225-ca62e35ed6b8/go.mod h1:44w9OfBSQ9l3o59rc2w3AnABtE44bmtNnRMNC7z+oKE=
|
||||||
r00t2.io/goutils v1.6.0 h1:oBC6PgBv0y/fdHeCmWgORHpBiU8uWw7IfFQJX5rIuzY=
|
r00t2.io/goutils v1.9.2 h1:1rcDgJ3MorWVBmZSvLpbAUNC+J+ctRfJQq5Wliucjww=
|
||||||
r00t2.io/goutils v1.6.0/go.mod h1:9ObJI9S71wDLTOahwoOPs19DhZVYrOh4LEHmQ8SW4Lk=
|
r00t2.io/goutils v1.9.2/go.mod h1:76AxpXUeL10uFklxRB11kQsrtj2AKiNm8AwG1bNoBCA=
|
||||||
r00t2.io/sysutils v1.1.1/go.mod h1:Wlfi1rrJpoKBOjWiYM9rw2FaiZqraD6VpXyiHgoDo/o=
|
r00t2.io/goutils v1.9.3 h1:pR9Ggu5JBpVjfrqNBrZg9bZpKan0TCcwt3MXrSdkhLo=
|
||||||
|
r00t2.io/goutils v1.9.3/go.mod h1:76AxpXUeL10uFklxRB11kQsrtj2AKiNm8AwG1bNoBCA=
|
||||||
|
r00t2.io/goutils v1.9.4 h1:+Bm72mKhgXs6DRtU3P4sBjqUNwAKAFfdF9lx5bomwQY=
|
||||||
|
r00t2.io/goutils v1.9.4/go.mod h1:76AxpXUeL10uFklxRB11kQsrtj2AKiNm8AwG1bNoBCA=
|
||||||
|
r00t2.io/goutils v1.9.5 h1:tIBtXKbGPLCkdhHZSESdTZ2QzC1e+8jDToNr/BauWe0=
|
||||||
|
r00t2.io/goutils v1.9.5/go.mod h1:76AxpXUeL10uFklxRB11kQsrtj2AKiNm8AwG1bNoBCA=
|
||||||
|
r00t2.io/goutils v1.9.6/go.mod h1:76AxpXUeL10uFklxRB11kQsrtj2AKiNm8AwG1bNoBCA=
|
||||||
|
14
ispriv/consts_nix.go
Normal file
14
ispriv/consts_nix.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ispriv
|
||||||
|
|
||||||
|
const (
|
||||||
|
sudoEnvPfx string = "SUDO_"
|
||||||
|
sudoUidEnv string = sudoEnvPfx + "UID"
|
||||||
|
sudoGidEnv string = sudoEnvPfx + "GID"
|
||||||
|
sudoUnameEnv string = sudoEnvPfx + "USER"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
curLoginUidFile string = "/proc/self/loginuid"
|
||||||
|
)
|
7
ispriv/doc_nix.go
Normal file
7
ispriv/doc_nix.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
//go:build unix
|
||||||
|
|
||||||
|
/*
|
||||||
|
ispriv provides functions and a method to determine if a process is being run SUID/SGID, under sudo, etc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ispriv
|
7
ispriv/doc_windows.go
Normal file
7
ispriv/doc_windows.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
ispriv provides functions on Windows to determine the currentl privilege status.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ispriv
|
68
ispriv/funcs_nix.go
Normal file
68
ispriv/funcs_nix.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ispriv
|
||||||
|
|
||||||
|
import (
|
||||||
|
`os`
|
||||||
|
|
||||||
|
`github.com/shirou/gopsutil/v4/process`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
GetProcIDs returns a ProcIDs from a given PID. An error will be raised if the process ID doesn't exist.
|
||||||
|
A negative value indicates "self" (see also GetProcIDsSelf).
|
||||||
|
|
||||||
|
Note that if you are not EUID == 0 (root) or you/the sudo target user does not own the process,
|
||||||
|
the returning ProcIDs is HIGHLY LIKELY to be very inaccurate.
|
||||||
|
*/
|
||||||
|
func GetProcIDs(pid int32) (p *ProcIDs, err error) {
|
||||||
|
|
||||||
|
var proc ProcIDs
|
||||||
|
var ids []uint32
|
||||||
|
|
||||||
|
if pid < 0 {
|
||||||
|
pid = int32(os.Getpid())
|
||||||
|
}
|
||||||
|
|
||||||
|
if proc.proc, err = process.NewProcess(pid); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ids, err = proc.proc.Gids(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.gids = &IdInfo{
|
||||||
|
real: uint(ids[0]),
|
||||||
|
effective: uint(ids[1]),
|
||||||
|
savedSet: uint(ids[2]),
|
||||||
|
filesystem: nil,
|
||||||
|
}
|
||||||
|
if len(ids) == 4 {
|
||||||
|
p.gids.filesystem = new(uint)
|
||||||
|
*p.gids.filesystem = uint(ids[3])
|
||||||
|
}
|
||||||
|
if ids, err = proc.proc.Uids(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.uids = &IdInfo{
|
||||||
|
real: uint(ids[0]),
|
||||||
|
effective: uint(ids[1]),
|
||||||
|
savedSet: uint(ids[2]),
|
||||||
|
filesystem: nil,
|
||||||
|
}
|
||||||
|
if len(ids) == 4 {
|
||||||
|
p.uids.filesystem = new(uint)
|
||||||
|
*p.uids.filesystem = uint(ids[3])
|
||||||
|
}
|
||||||
|
|
||||||
|
p = &proc
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProcIDsSelf returns a ProcIDs from the current process' PID.
|
||||||
|
func GetProcIDsSelf() (p *ProcIDs, err error) {
|
||||||
|
|
||||||
|
p, err = GetProcIDs(int32(os.Getpid()))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
426
ispriv/funcs_procids_nix.go
Normal file
426
ispriv/funcs_procids_nix.go
Normal file
@ -0,0 +1,426 @@
|
|||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ispriv
|
||||||
|
|
||||||
|
import (
|
||||||
|
`errors`
|
||||||
|
`os`
|
||||||
|
`os/user`
|
||||||
|
`strconv`
|
||||||
|
`strings`
|
||||||
|
|
||||||
|
`github.com/shirou/gopsutil/v4/process`
|
||||||
|
`golang.org/x/sys/unix`
|
||||||
|
`r00t2.io/sysutils/envs`
|
||||||
|
`r00t2.io/sysutils/paths`
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEffective returns the EUID/EGID.
|
||||||
|
func (p *ProcIDs) GetEffective() (euid, egid uint) {
|
||||||
|
|
||||||
|
euid = p.uids.effective
|
||||||
|
egid = p.gids.effective
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFS returns the FSUID/FSGID. Not all platforms have this, in which case they'll be nil.
|
||||||
|
func (p *ProcIDs) GetFS() (fsuid, fsgid *uint) {
|
||||||
|
|
||||||
|
if p.uids.filesystem != nil {
|
||||||
|
fsuid = new(uint)
|
||||||
|
*fsuid = *p.uids.filesystem
|
||||||
|
}
|
||||||
|
if p.gids.filesystem != nil {
|
||||||
|
fsgid = new(uint)
|
||||||
|
*fsgid = *p.gids.filesystem
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GetGids returms a set of a ProcIDs GIDs.
|
||||||
|
fs will be nil if unsupported on the platform.
|
||||||
|
If invoked with SGID, "savedSet" will be the SGID GID.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) GetGids() (real, effective, savedSet uint, fs *uint) {
|
||||||
|
|
||||||
|
real = p.gids.real
|
||||||
|
effective = p.gids.effective
|
||||||
|
savedSet = p.gids.savedSet
|
||||||
|
if p.gids.filesystem != nil {
|
||||||
|
fs = new(uint)
|
||||||
|
*fs = *p.gids.filesystem
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReal returns the (R)UID/(R)GID.
|
||||||
|
func (p *ProcIDs) GetReal() (ruid, rgid uint) {
|
||||||
|
|
||||||
|
ruid = p.uids.real
|
||||||
|
rgid = p.gids.real
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSaved returns the SUID/SGID.
|
||||||
|
func (p *ProcIDs) GetSaved() (suid, sgid uint) {
|
||||||
|
|
||||||
|
suid = p.uids.savedSet
|
||||||
|
sgid = p.gids.savedSet
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GetUids returms a set of a ProcIDs UIDs.
|
||||||
|
fs will be nil if unsupported on the platform.
|
||||||
|
If invoked with SUID, "savedSet" will be the SUID UID.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) GetUids() (real, effective, savedSet uint, fs *uint) {
|
||||||
|
|
||||||
|
real = p.uids.real
|
||||||
|
effective = p.uids.effective
|
||||||
|
savedSet = p.uids.savedSet
|
||||||
|
if p.uids.filesystem != nil {
|
||||||
|
fs = new(uint)
|
||||||
|
*fs = *p.uids.filesystem
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSGID returns true if the process is Set GID/SGID.
|
||||||
|
|
||||||
|
Note that it will return false if invoked by a group with the same GID as an SGID that's set.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSGID() (isSgid bool) {
|
||||||
|
|
||||||
|
isSgid = p.gids.real != p.gids.savedSet
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSUID returns true if the process is Set UID/SUID.
|
||||||
|
|
||||||
|
Note that it will return false if invoked by a user with the same UID as an SUID that's set.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSUID() (isSuid bool) {
|
||||||
|
|
||||||
|
isSuid = p.uids.real != p.uids.savedSet
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudo does a very fast (and potentially inaccurate) evaluation of whether the process is running under sudo.
|
||||||
|
|
||||||
|
DO NOT use this function for security-sensitive uses, fully accurate results, or critical implementations!
|
||||||
|
Use IsSudoWithConfidence instead for those cases.
|
||||||
|
IsSudo only does the most basic of checking, which can be easily and completely overridden by a non-privileged user.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSudo() (isSudo bool) {
|
||||||
|
|
||||||
|
// This is how every other Joe Blow does this. It's an extremely dumb way to do it. The caller has been warned.
|
||||||
|
for k, _ := range envs.GetEnvMap() {
|
||||||
|
if strings.HasPrefix(k, sudoEnvPfx) {
|
||||||
|
isSudo = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudoDetailed returns true for a very fast evaluation of whether the process is running under sudo,
|
||||||
|
and information about that context.
|
||||||
|
(If isSudo is false, originalUid/originalGid will both be -1 and originalUser will be nil.)
|
||||||
|
|
||||||
|
DO NOT use this function for security-sensitive uses, fully accurate results, or critical implementations!
|
||||||
|
Use IsSudoWithConfidenceDetailed instead for those cases.
|
||||||
|
IsSudoDetailed only does the most basic of checking, which can be easily and completely overridden by a non-privileged user.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSudoDetailed() (isSudo bool, originalUid, originalGid int, originalUser *user.User, err error) {
|
||||||
|
|
||||||
|
if originalUid, originalGid, originalUser, err = p.getSudoInfoEnv(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if originalUid >= 0 || originalGid >= 0 || originalUser != nil {
|
||||||
|
isSudo = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudoWithConfidence is like IsSudo, but is *much* more throrough.
|
||||||
|
|
||||||
|
It not only returns isSudo, which is true if *any* indicators pass,
|
||||||
|
but also:
|
||||||
|
|
||||||
|
* a confidence value (which indicates *how many* indicators *passed*)
|
||||||
|
* a maxConfidence value (which indicates how many indicators were *tested*)
|
||||||
|
* a score value (which is a float indicating overall confidence on a fixed and weighted scale; higher is more confident, 1.0 indicates 100% confidence)
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSudoWithConfidence() (isSudo bool, confidence, maxConfidence uint, score float64, err error) {
|
||||||
|
|
||||||
|
// confidence/maxConfidence are not used directly; they're unweighted counters.
|
||||||
|
var scoreConf uint
|
||||||
|
var scoreMaxConf uint
|
||||||
|
|
||||||
|
score = float64(scoreConf) / float64(scoreMaxConf)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsSudoWithConfidenceDetailed is like IsSudoDetailed, but is *much* more throrough.
|
||||||
|
|
||||||
|
It not only returns the same results as IsSudoDetailed, but includes the same scoring values/system as IsSudoWithConfidence.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) IsSudoWithConfidenceDetailed() (isSudo bool, confidence, maxConfidence uint, score float64, originalUid, originalGid int, originalUser *user.User, err error) {
|
||||||
|
|
||||||
|
var b []byte
|
||||||
|
var ok bool
|
||||||
|
var permErr bool
|
||||||
|
var envUid int
|
||||||
|
var envGid int
|
||||||
|
var scoreConf uint
|
||||||
|
var scoreMaxConf uint
|
||||||
|
var curUser *user.User
|
||||||
|
var envUser *user.User
|
||||||
|
var curUid uint64
|
||||||
|
var fstat unix.Stat_t
|
||||||
|
var fsUid int
|
||||||
|
var procFiles []process.OpenFilesStat
|
||||||
|
var loginUidFile string = curLoginUidFile
|
||||||
|
|
||||||
|
if curUser, err = user.Current(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if curUid, err = strconv.ParseUint(curUser.Uid, 10, 32); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if procFiles, err = p.proc.OpenFiles(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Env vars; only score 1x/each.
|
||||||
|
maxConfidence += 3
|
||||||
|
scoreMaxConf += 3
|
||||||
|
if envUid, envGid, envUser, err = p.getSudoInfoEnv(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
originalUid, originalGid, originalUser = envUid, envGid, envUser
|
||||||
|
if envUid >= 0 {
|
||||||
|
confidence++
|
||||||
|
scoreConf++
|
||||||
|
}
|
||||||
|
if envGid >= 0 {
|
||||||
|
confidence++
|
||||||
|
scoreConf++
|
||||||
|
}
|
||||||
|
if envUser != nil {
|
||||||
|
confidence++
|
||||||
|
scoreConf++
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
TTY/PTY ownership. We (can) only check this if we're running in an interactive session.
|
||||||
|
|
||||||
|
Typically this is done via (golang.org/x/term).IsTerminal(),
|
||||||
|
That pulls in a bunch of stuff I don't need, though, so I'll just replicate (...).IsTerminal() here;
|
||||||
|
it's just a wrapped single function call.
|
||||||
|
*/
|
||||||
|
// procFiles[0] is always STDIN. Whether it's a pipe, or TTY/PTY, or file, etc.
|
||||||
|
// (likewise, procFiles[1] is always STDOUT, procFiles[2] is always STDERR); however...
|
||||||
|
if _, err = unix.IoctlGetTermios(int(procFiles[0].Fd), unix.TCGETS); err == nil {
|
||||||
|
// Interactive
|
||||||
|
maxConfidence++
|
||||||
|
// This is only worth 2. It's pretty hard to fake unless origin user is root,
|
||||||
|
// but it's ALSO usually set to the target user.
|
||||||
|
scoreMaxConf += 2
|
||||||
|
fstat = unix.Stat_t{}
|
||||||
|
if err = unix.Fstat(int(procFiles[0].Fd), &fstat); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if uint64(fstat.Uid) != curUid {
|
||||||
|
// This is a... *potential* indicator, if a lateral sudo was done (user1 => user2),
|
||||||
|
// or root used sudo to *drop* privs to a regular user.
|
||||||
|
// We mark it as a pass for confidence since it IS a terminal, and it's permission-related.
|
||||||
|
confidence++
|
||||||
|
scoreConf += 2
|
||||||
|
originalUid = int(fstat.Uid)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// err is OK; just means non-interactive. No counter or score/max score increase; basically a NO-OP.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// /proc/self/loginuid
|
||||||
|
// This is a REALLY good indicator. Probably the strongest next to reverse-walking the proc tree. It depends on PAM and auditd support, I think,
|
||||||
|
// BUT if it's present it's *really* really strong.
|
||||||
|
if ok, err = paths.RealPathExists(&loginUidFile); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
maxConfidence++
|
||||||
|
scoreMaxConf += 5
|
||||||
|
if b, err = os.ReadFile(loginUidFile); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if fsUid, err = strconv.Atoi(strings.TrimSpace(string(b))); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if uint64(fsUid) != curUid {
|
||||||
|
confidence++
|
||||||
|
scoreConf += 5
|
||||||
|
originalUid = fsUid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// proc tree reverse walking.
|
||||||
|
// This is, by far, the most reliable method.
|
||||||
|
// There are some valid conditions in which this would fail due to permissions
|
||||||
|
// (e.g. lateral sudo: user1 => user2), but if it's a permission error it's *probably*
|
||||||
|
// a lateral move anyways.
|
||||||
|
if isSudo, permErr, originalUid, originalGid, originalUser, err = p.revProcWalk(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
maxConfidence++
|
||||||
|
scoreMaxConf += 10
|
||||||
|
if permErr {
|
||||||
|
confidence++
|
||||||
|
scoreConf += 5
|
||||||
|
} else if isSudo {
|
||||||
|
confidence++
|
||||||
|
scoreConf += 10
|
||||||
|
}
|
||||||
|
|
||||||
|
score = float64(scoreConf) / float64(scoreMaxConf)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
getSudoInfoEnv returns env var driven sudo information.
|
||||||
|
|
||||||
|
These are in no way guaranteed to be accurate as the user can remove or override them.
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) getSudoInfoEnv() (uid, gid int, u *user.User, err error) {
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
var val string
|
||||||
|
var envMap map[string]string = envs.GetEnvMap()
|
||||||
|
|
||||||
|
uid = -1
|
||||||
|
gid = -1
|
||||||
|
|
||||||
|
if val, ok = envMap[sudoUnameEnv]; ok {
|
||||||
|
if u, err = user.Lookup(val); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if val, ok = envMap[sudoUidEnv]; ok {
|
||||||
|
if uid, err = strconv.Atoi(val); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if val, ok = envMap[sudoGidEnv]; ok {
|
||||||
|
if gid, err = strconv.Atoi(val); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
revProcWalk walks up the process tree ("proctree") until it either:
|
||||||
|
|
||||||
|
* finds a process invoked with sudo (true)
|
||||||
|
* hits PID == 1 (false)
|
||||||
|
* hits a permission error (true-ish)
|
||||||
|
*/
|
||||||
|
func (p *ProcIDs) revProcWalk() (sudoFound, isPermErr bool, origUid, origGid int, origUser *user.User, err error) {
|
||||||
|
|
||||||
|
var cmd []string
|
||||||
|
var parent *ProcIDs
|
||||||
|
var parentPid int32
|
||||||
|
var parentUname string
|
||||||
|
var parentUids []uint32
|
||||||
|
var parentGids []uint32
|
||||||
|
|
||||||
|
origUid = -1
|
||||||
|
origGid = -1
|
||||||
|
|
||||||
|
parent = p
|
||||||
|
for {
|
||||||
|
if parent == nil || parent.proc.Pid == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cmd, err = parent.proc.CmdlineSlice(); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if cmd[0] == "sudo" {
|
||||||
|
sudoFound = true
|
||||||
|
if parentUname, err = parent.proc.Username(); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if parentUids, err = parent.proc.Uids(); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if parentGids, err = parent.proc.Gids(); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if origUser, err = user.Lookup(parentUname); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
origUid = int(parentUids[0])
|
||||||
|
origGid = int(parentGids[0])
|
||||||
|
}
|
||||||
|
if sudoFound {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if parentPid, err = parent.proc.Ppid(); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if parent, err = GetProcIDs(parentPid); err != nil {
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
isPermErr = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
60
ispriv/funcs_windows.go
Normal file
60
ispriv/funcs_windows.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package ispriv
|
||||||
|
|
||||||
|
import (
|
||||||
|
`golang.org/x/sys/windows`
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsAdmin returns true if currently running with Administrator privileges.
|
||||||
|
func IsAdmin() (admin bool, err error) {
|
||||||
|
|
||||||
|
var sid *windows.SID
|
||||||
|
var tok windows.Token
|
||||||
|
|
||||||
|
if err = windows.AllocateAndInitializeSid(
|
||||||
|
&windows.SECURITY_NT_AUTHORITY, // identAuth
|
||||||
|
2, // subAuth
|
||||||
|
windows.SECURITY_BUILTIN_DOMAIN_RID, // subAuth0
|
||||||
|
windows.DOMAIN_ALIAS_RID_ADMINS, // subAuth1
|
||||||
|
0, 0, 0, 0, 0, 0, // subAuth2-10
|
||||||
|
&sid, // sid
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer windows.FreeSid(sid)
|
||||||
|
|
||||||
|
tok = windows.Token(0)
|
||||||
|
if admin, err = tok.IsMember(sid); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsElevated returns true if running in an elevated ("Run as Administrator") context.
|
||||||
|
func IsElevated() (elevated bool) {
|
||||||
|
|
||||||
|
var tok windows.Token = windows.Token(0)
|
||||||
|
|
||||||
|
elevated = tok.IsElevated()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
IsPrivileged indicates that the current security context is running both
|
||||||
|
with Administrator priviliges AND is elevated.
|
||||||
|
*/
|
||||||
|
func IsPrivileged() (privileged bool, err error) {
|
||||||
|
|
||||||
|
if privileged, err = IsAdmin(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if privileged {
|
||||||
|
privileged = IsElevated()
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
19
ispriv/types_nix.go
Normal file
19
ispriv/types_nix.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ispriv
|
||||||
|
|
||||||
|
import (
|
||||||
|
`github.com/shirou/gopsutil/v4/process`
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProcIDs struct {
|
||||||
|
proc *process.Process
|
||||||
|
uids *IdInfo
|
||||||
|
gids *IdInfo
|
||||||
|
}
|
||||||
|
type IdInfo struct {
|
||||||
|
real uint
|
||||||
|
effective uint
|
||||||
|
savedSet uint
|
||||||
|
filesystem *uint
|
||||||
|
}
|
1
paths/TODO
Normal file
1
paths/TODO
Normal file
@ -0,0 +1 @@
|
|||||||
|
- search criteria should *also* support a timestamp range (e.g. so a search can be restricted to both older than AND newer than; e.g. older than 00:00, newer than 01:00)
|
46
paths/consts.go
Normal file
46
paths/consts.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
GenericSeparator rune = '/'
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mostly just for reference.
|
||||||
|
const (
|
||||||
|
// ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeCharDevice | ModeIrregular
|
||||||
|
modeDir pathMode = pathMode(fs.ModeDir)
|
||||||
|
modeSymlink pathMode = pathMode(fs.ModeSymlink)
|
||||||
|
modePipe pathMode = pathMode(fs.ModeNamedPipe)
|
||||||
|
modeSocket pathMode = pathMode(fs.ModeSocket)
|
||||||
|
modeDev pathMode = pathMode(fs.ModeDevice)
|
||||||
|
modeCharDev pathMode = pathMode(fs.ModeCharDevice)
|
||||||
|
modeIrregular pathMode = pathMode(fs.ModeIrregular)
|
||||||
|
modeAnyExceptRegular pathMode = modeDir | modeSymlink | modePipe | modeSocket | modeDev | modeCharDev | modeIrregular
|
||||||
|
)
|
||||||
|
|
||||||
|
// Miss reasons
|
||||||
|
const (
|
||||||
|
MissNoMiss missReason = ""
|
||||||
|
MissNoMeta missReason = "Could not determine metadata"
|
||||||
|
MissBadBase missReason = "Base name does not match BasePtrn"
|
||||||
|
MissBadPath missReason = "Path does not match PathPtrn"
|
||||||
|
MissBadTime missReason = "Time(s) does not/do not match Age"
|
||||||
|
MissFile missReason = "Object is a file and NoFiles is set"
|
||||||
|
MissType missReason = "Object does not match TargetType"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Times
|
||||||
|
const TimeAny pathTimeType = 0
|
||||||
|
const (
|
||||||
|
// TimeAccessed == atime
|
||||||
|
TimeAccessed pathTimeType = 1 << iota
|
||||||
|
// TimeCreated == "birth" time (*NOT* ctime! See TimeChanged)
|
||||||
|
TimeCreated
|
||||||
|
// TimeChanged == ctime
|
||||||
|
TimeChanged
|
||||||
|
// TimeModified == mtime
|
||||||
|
TimeModified
|
||||||
|
)
|
17
paths/consts_unix.go
Normal file
17
paths/consts_unix.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package paths
|
||||||
|
|
||||||
|
const (
|
||||||
|
/*
|
||||||
|
MaxSymlinkLevel is hardcoded into the kernel for macOS, BSDs and Linux. It's unlikely to change.
|
||||||
|
Thankfully, it's the same on all of them.
|
||||||
|
|
||||||
|
On all, it's defined as MAXSYMLINKS in the following headers:
|
||||||
|
|
||||||
|
macOS (no, macOS is not a BSD; no, it is not FreeBSD; yes, I *will* fight you on it and win): sys/param.h
|
||||||
|
BSDs: sys/sys/param.h
|
||||||
|
Linux: include/linux/namei.h
|
||||||
|
*/
|
||||||
|
MaxSymlinkLevel uint = 40
|
||||||
|
)
|
15
paths/consts_windows.go
Normal file
15
paths/consts_windows.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package paths
|
||||||
|
|
||||||
|
const (
|
||||||
|
/*
|
||||||
|
MaxSymLinkLevel on Windows is weird; Microsoft calls them "reparse points".
|
||||||
|
|
||||||
|
And it changes on the Windows version you're on, but it's been 63 past Windows Server 2003/Windows XP.
|
||||||
|
They're *very* EOL, so I'm completely ignoring them.
|
||||||
|
|
||||||
|
https://learn.microsoft.com/en-us/windows/win32/fileio/symbolic-link-programming-consideration
|
||||||
|
*/
|
||||||
|
MaxSymlinkLevel uint = 63
|
||||||
|
)
|
14
paths/errs.go
Normal file
14
paths/errs.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrMaxSymlinkLevel = fmt.Errorf("max symlink level met/exceeded")
|
||||||
|
ErrNilErrChan error = errors.New("an initialized error channel is required")
|
||||||
|
ErrNilMatchChan error = errors.New("an initialized matches channel is required")
|
||||||
|
ErrNilMismatchChan error = errors.New("an initialized mismatches channel is required")
|
||||||
|
ErrNilWg error = errors.New("a non-nil sync.WaitGroup is required")
|
||||||
|
)
|
653
paths/funcs.go
653
paths/funcs.go
@ -19,21 +19,31 @@
|
|||||||
package paths
|
package paths
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
// "syscall"
|
// "syscall"
|
||||||
|
|
||||||
|
"github.com/djherbis/times"
|
||||||
|
"r00t2.io/goutils/bitmask"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ExpandHome will take a tilde(~)-prefixed path and resolve it to the actual path in-place.
|
ExpandHome will take a tilde(~)-prefixed path and resolve it to the actual path in-place.
|
||||||
"Nested" user paths (~someuser/somechroot/~someotheruser) are not supported as home directories are expected to be absolute paths.
|
"Nested" user paths (~someuser/somechroot/~someotheruser) are not supported as home directories are expected to be absolute paths.
|
||||||
*/
|
*/
|
||||||
func ExpandHome(path *string) (err error) {
|
func ExpandHome(p *string) (err error) {
|
||||||
|
|
||||||
var unameSplit []string
|
var unameSplit []string
|
||||||
var uname string
|
var uname string
|
||||||
@ -42,10 +52,10 @@ func ExpandHome(path *string) (err error) {
|
|||||||
|
|
||||||
// Props to this guy.
|
// Props to this guy.
|
||||||
// https://stackoverflow.com/a/43578461/733214
|
// https://stackoverflow.com/a/43578461/733214
|
||||||
if len(*path) == 0 {
|
if len(*p) == 0 {
|
||||||
err = errors.New("empty path")
|
err = errors.New("empty path")
|
||||||
return
|
return
|
||||||
} else if (*path)[0] != '~' {
|
} else if (*p)[0] != '~' {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +71,7 @@ func ExpandHome(path *string) (err error) {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
// K but do it smarter.
|
// K but do it smarter.
|
||||||
unameSplit = strings.SplitN(*path, string(os.PathSeparator), 2)
|
unameSplit = strings.SplitN(*p, string(os.PathSeparator), 2)
|
||||||
if len(unameSplit) != 2 {
|
if len(unameSplit) != 2 {
|
||||||
unameSplit = append(unameSplit, "")
|
unameSplit = append(unameSplit, "")
|
||||||
}
|
}
|
||||||
@ -77,58 +87,58 @@ func ExpandHome(path *string) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*path = filepath.Join(u.HomeDir, unameSplit[1])
|
*p = filepath.Join(u.HomeDir, unameSplit[1])
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetFirst is the file equivalent of envs.GetFirst.
|
GetFirst is the file equivalent of envs.GetFirst.
|
||||||
|
|
||||||
It iterates through paths, normalizing them along the way
|
It iterates through paths, normalizing them along the way
|
||||||
(so abstracted paths such as ~/foo/bar.txt and relative paths
|
(so abstracted paths such as ~/foo/bar.txt and relative paths
|
||||||
such as bar/baz.txt will still work), and returns the content
|
such as bar/baz.txt will still work), and returns the content
|
||||||
of the first found existing file. If the first found path
|
of the first found existing file. If the first found path
|
||||||
is a directory, content will be nil but isDir will be true
|
is a directory, content will be nil but isDir will be true
|
||||||
(as will ok).
|
(as will ok).
|
||||||
|
|
||||||
If no path exists, ok will be false.
|
If no path exists, ok will be false.
|
||||||
|
|
||||||
As always, results are not guaranteed due to permissions, etc.
|
As always, results are not guaranteed due to permissions, etc.
|
||||||
potentially returning an inaccurate result.
|
potentially returning an inaccurate result.
|
||||||
|
|
||||||
This is a thin wrapper around GetFirstWithRef.
|
This is a thin wrapper around GetFirstWithRef.
|
||||||
*/
|
*/
|
||||||
func GetFirst(paths []string) (content []byte, isDir, ok bool) {
|
func GetFirst(p []string) (content []byte, isDir, ok bool) {
|
||||||
|
|
||||||
content, isDir, ok, _ = GetFirstWithRef(paths)
|
content, isDir, ok, _ = GetFirstWithRef(p)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GetFirstWithRef is the file equivalent of envs.GetFirstWithRef.
|
GetFirstWithRef is the file equivalent of envs.GetFirstWithRef.
|
||||||
|
|
||||||
It behaves exactly like GetFirst, but with an additional returned value, idx,
|
It behaves exactly like GetFirst, but with an additional returned value, idx,
|
||||||
which specifies the index in paths in which a path was found.
|
which specifies the index in p in which a path was found.
|
||||||
|
|
||||||
As always, results are not guaranteed due to permissions, etc.
|
As always, results are not guaranteed due to permissions, etc.
|
||||||
potentially returning an inaccurate result.
|
potentially returning an inaccurate result.
|
||||||
*/
|
*/
|
||||||
func GetFirstWithRef(paths []string) (content []byte, isDir, ok bool, idx int) {
|
func GetFirstWithRef(p []string) (content []byte, isDir, ok bool, idx int) {
|
||||||
|
|
||||||
var locPaths []string
|
var locPaths []string
|
||||||
var exists bool
|
var exists bool
|
||||||
var stat os.FileInfo
|
var stat fs.FileInfo
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
idx = -1
|
idx = -1
|
||||||
// We have to be a little less cavalier about this.
|
// We have to be a little less cavalier about this.
|
||||||
if paths == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
locPaths = make([]string, len(paths))
|
locPaths = make([]string, len(p))
|
||||||
locPaths = paths[:] // Create an explicit copy so we don't modify paths.
|
locPaths = p[:] // Create an explicit copy so we don't modify p.
|
||||||
for i, p := range locPaths {
|
for i, p := range locPaths {
|
||||||
if exists, stat, err = RealPathExistsStat(&p); err != nil {
|
if exists, stat, err = RealPathExistsStat(&p); err != nil {
|
||||||
err = nil
|
err = nil
|
||||||
@ -151,6 +161,30 @@ func GetFirstWithRef(paths []string) (content []byte, isDir, ok bool, idx int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Len returns the number of path segments in p, as split with the same param signature to Segment.
|
||||||
|
|
||||||
|
See Segment for details on abs and strict.
|
||||||
|
*/
|
||||||
|
func Len(p string, abs, strict bool) (segments int) {
|
||||||
|
|
||||||
|
segments = len(Segment(p, abs, strict))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
LenSys returns the number of path segments in p, as split with the same param signature to SegmentSys.
|
||||||
|
|
||||||
|
See Segment for details on abs and strict.
|
||||||
|
*/
|
||||||
|
func LenSys(p string, abs, strict bool) (segments int) {
|
||||||
|
|
||||||
|
segments = len(SegmentSys(p, abs, strict))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
MakeDirIfNotExist will create a directory at a given path if it doesn't exist.
|
MakeDirIfNotExist will create a directory at a given path if it doesn't exist.
|
||||||
|
|
||||||
@ -158,11 +192,11 @@ See also the documentation for RealPath.
|
|||||||
|
|
||||||
This is a bit more sane option than os.MkdirAll as it will normalize paths a little better.
|
This is a bit more sane option than os.MkdirAll as it will normalize paths a little better.
|
||||||
*/
|
*/
|
||||||
func MakeDirIfNotExist(path string) (err error) {
|
func MakeDirIfNotExist(p string) (err error) {
|
||||||
|
|
||||||
var stat os.FileInfo
|
var stat fs.FileInfo
|
||||||
var exists bool
|
var exists bool
|
||||||
var locPath string = path
|
var locPath string = p
|
||||||
|
|
||||||
if exists, stat, err = RealPathExistsStat(&locPath); err != nil {
|
if exists, stat, err = RealPathExistsStat(&locPath); err != nil {
|
||||||
if !exists {
|
if !exists {
|
||||||
@ -199,20 +233,86 @@ It is recommended to check err (if not nil) for an invalid path error. If this i
|
|||||||
path syntax/string itself is not supported on the runtime OS. This can be done via:
|
path syntax/string itself is not supported on the runtime OS. This can be done via:
|
||||||
|
|
||||||
if errors.Is(err, fs.ErrInvalid) {...}
|
if errors.Is(err, fs.ErrInvalid) {...}
|
||||||
|
|
||||||
|
RealPath is simply a wrapper around ExpandHome(path) and filepath.Abs(*path).
|
||||||
|
|
||||||
|
Note that RealPath does *not* resolve symlinks. Only RealPathExistsStatTarget does that.
|
||||||
*/
|
*/
|
||||||
func RealPath(path *string) (err error) {
|
func RealPath(p *string) (err error) {
|
||||||
|
|
||||||
if err = ExpandHome(path); err != nil {
|
if err = ExpandHome(p); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if *path, err = filepath.Abs(*path); err != nil {
|
if *p, err = filepath.Abs(*p); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
RealPathJoin combines RealPath with (path).Join.
|
||||||
|
|
||||||
|
If dst is nil, then p will be updated with the new value.
|
||||||
|
You probably don't want that.
|
||||||
|
*/
|
||||||
|
func RealPathJoin(p, dst *string, subPaths ...string) (err error) {
|
||||||
|
|
||||||
|
var newPath string
|
||||||
|
var realDst *string
|
||||||
|
|
||||||
|
if err = RealPath(p); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if dst == nil {
|
||||||
|
realDst = p
|
||||||
|
} else {
|
||||||
|
realDst = dst
|
||||||
|
}
|
||||||
|
|
||||||
|
newPath = path.Join(append([]string{*p}, subPaths...)...)
|
||||||
|
if err = RealPath(&newPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
*realDst = newPath
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
RealPathJoinSys combines RealPath with (path/filepath).Join.
|
||||||
|
|
||||||
|
If dst is nil, then path will be updated with the new value.
|
||||||
|
You probably don't want that.
|
||||||
|
*/
|
||||||
|
func RealPathJoinSys(p, dst *string, subPaths ...string) (err error) {
|
||||||
|
|
||||||
|
var newPath string
|
||||||
|
var realDst *string
|
||||||
|
|
||||||
|
if err = RealPath(p); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if dst == nil {
|
||||||
|
realDst = p
|
||||||
|
} else {
|
||||||
|
realDst = dst
|
||||||
|
}
|
||||||
|
|
||||||
|
newPath = filepath.Join(append([]string{*p}, subPaths...)...)
|
||||||
|
if err = RealPath(&newPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
*realDst = newPath
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
RealPathExists is like RealPath, but will also return a boolean as to whether the path
|
RealPathExists is like RealPath, but will also return a boolean as to whether the path
|
||||||
actually exists or not.
|
actually exists or not.
|
||||||
@ -229,13 +329,13 @@ See also the documentation for RealPath.
|
|||||||
|
|
||||||
In those cases, it may be preferable to use RealPathExistsStat and checking stat for nil.
|
In those cases, it may be preferable to use RealPathExistsStat and checking stat for nil.
|
||||||
*/
|
*/
|
||||||
func RealPathExists(path *string) (exists bool, err error) {
|
func RealPathExists(p *string) (exists bool, err error) {
|
||||||
|
|
||||||
if err = RealPath(path); err != nil {
|
if err = RealPath(p); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = os.Stat(*path); err != nil {
|
if _, err = os.Stat(*p); err != nil {
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
@ -248,21 +348,492 @@ func RealPathExists(path *string) (exists bool, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
RealPathExistsStat is like RealPathExists except it will also return the os.FileInfo
|
RealPathExistsStat is like RealPathExists except it will also return the fs.FileInfo
|
||||||
for the path (assuming it exists).
|
for the path (assuming it exists).
|
||||||
|
|
||||||
If stat is nil, it is highly recommended to check err via the methods suggested
|
If stat is nil, it is highly recommended to check err via the methods suggested
|
||||||
in the documentation for RealPath and RealPathExists.
|
in the documentation for RealPath and RealPathExists.
|
||||||
*/
|
*/
|
||||||
func RealPathExistsStat(path *string) (exists bool, stat os.FileInfo, err error) {
|
func RealPathExistsStat(p *string) (exists bool, stat fs.FileInfo, err error) {
|
||||||
|
|
||||||
if exists, err = RealPathExists(path); err != nil {
|
if exists, err = RealPathExists(p); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat, err = os.Stat(*path); err != nil {
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat, err = os.Stat(*p); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
RealPathExistsStatTarget is the only "RealPather" that will resolve p to the (final) *target* of p if p is a symlink.
|
||||||
|
|
||||||
|
If p is not a symlink but does exist, the tgt* will reflect the same as p*.
|
||||||
|
|
||||||
|
See WalkLink for details on relRoot and other assorted rules/logic (RealPathExistsStatTarget wraps WalkLink).
|
||||||
|
*/
|
||||||
|
func RealPathExistsStatTarget(p *string, relRoot string) (pExists, tgtExists, wasLink bool, pStat fs.FileInfo, tgtStat fs.FileInfo, err error) {
|
||||||
|
|
||||||
|
var tgts []string
|
||||||
|
|
||||||
|
if pExists, err = RealPathExists(p); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tgtExists = pExists
|
||||||
|
if !pExists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Can't use RealPathExistsStat because it calls os.Stat, not os.Lstat... thus defeating the purpose.
|
||||||
|
if pStat, err = os.Lstat(*p); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tgtStat = pStat
|
||||||
|
|
||||||
|
wasLink = pStat.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink
|
||||||
|
|
||||||
|
if wasLink {
|
||||||
|
if tgts, err = WalkLink(*p, relRoot); err != nil || tgts == nil || len(tgts) == 0 {
|
||||||
|
tgtExists = false
|
||||||
|
tgtStat = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if tgtExists, tgtStat, err = RealPathExistsStat(&tgts[len(tgts)-1]); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*p = tgts[len(tgts)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchFsPaths gets a file/directory/etc. path list based on the provided criteria.
|
||||||
|
func SearchFsPaths(matcher FsSearchCriteria) (found, miss []*FsSearchResult, err error) {
|
||||||
|
|
||||||
|
var matched *FsSearchResult
|
||||||
|
var missed *FsSearchResult
|
||||||
|
|
||||||
|
if err = RealPath(&matcher.Root); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = filepath.WalkDir(
|
||||||
|
matcher.Root,
|
||||||
|
func(path string, d fs.DirEntry, inErr error) (outErr error) {
|
||||||
|
|
||||||
|
if inErr != nil {
|
||||||
|
outErr = inErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched, missed, outErr = matcher.Match(path, d, nil); outErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if matched != nil && !matcher.NoMatch {
|
||||||
|
found = append(found, matched)
|
||||||
|
}
|
||||||
|
if missed != nil && !matcher.NoMismatch {
|
||||||
|
miss = append(miss, missed)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if found == nil || len(found) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// And sort them.
|
||||||
|
sort.Slice(
|
||||||
|
found,
|
||||||
|
func(i, j int) (isLess bool) {
|
||||||
|
isLess = found[i].Path < found[j].Path
|
||||||
|
return
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
SearchFsPathsAsync is exactly like SearchFsPaths, but dispatches off concurrent
|
||||||
|
workers for the filtering logic instead of performing iteratively/recursively.
|
||||||
|
It may, in some cases, be *slightly more* performant and *slightly less* in others.
|
||||||
|
Note that unlike SearchFsPaths, the results written to the
|
||||||
|
FsSearchCriteriaAsync.ResChan are not guaranteed to be in any predictable order.
|
||||||
|
|
||||||
|
All channels are expected to have already been initialized by the caller.
|
||||||
|
They will not be closed by this function.
|
||||||
|
*/
|
||||||
|
func SearchFsPathsAsync(matcher FsSearchCriteriaAsync) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var wgLocal sync.WaitGroup
|
||||||
|
var doneChan chan bool = make(chan bool, 1)
|
||||||
|
|
||||||
|
if matcher.ErrChan == nil {
|
||||||
|
panic(ErrNilErrChan)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if matcher.WG == nil {
|
||||||
|
matcher.ErrChan <- ErrNilWg
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer matcher.WG.Done()
|
||||||
|
|
||||||
|
if matcher.ResChan == nil && !matcher.NoMatch {
|
||||||
|
matcher.ErrChan <- ErrNilMatchChan
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if matcher.MismatchChan == nil && !matcher.NoMismatch {
|
||||||
|
matcher.ErrChan <- ErrNilMismatchChan
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = RealPath(&matcher.Root); err != nil {
|
||||||
|
matcher.ErrChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if matcher.Semaphore != nil && matcher.SemaphoreCtx == nil {
|
||||||
|
matcher.SemaphoreCtx = context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = filepath.WalkDir(
|
||||||
|
matcher.Root,
|
||||||
|
func(path string, de fs.DirEntry, inErr error) (outErr error) {
|
||||||
|
|
||||||
|
if inErr != nil {
|
||||||
|
inErr = filterNoFileDir(inErr)
|
||||||
|
if inErr != nil {
|
||||||
|
outErr = inErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wgLocal.Add(1)
|
||||||
|
if matcher.Semaphore != nil {
|
||||||
|
if err = matcher.Semaphore.Acquire(matcher.SemaphoreCtx, 1); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(p string, d fs.DirEntry) {
|
||||||
|
var pErr error
|
||||||
|
var pResMatch *FsSearchResult
|
||||||
|
var pResMiss *FsSearchResult
|
||||||
|
|
||||||
|
defer wgLocal.Done()
|
||||||
|
|
||||||
|
if matcher.Semaphore != nil {
|
||||||
|
defer matcher.Semaphore.Release(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pResMatch, pResMiss, pErr = matcher.Match(p, d, nil); pErr != nil {
|
||||||
|
matcher.ErrChan <- pErr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if pResMatch != nil && !matcher.NoMatch {
|
||||||
|
matcher.ResChan <- pResMatch
|
||||||
|
}
|
||||||
|
if pResMiss != nil && !matcher.NoMismatch {
|
||||||
|
matcher.MismatchChan <- pResMiss
|
||||||
|
}
|
||||||
|
}(path, de)
|
||||||
|
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
err = filterNoFileDir(err)
|
||||||
|
if err != nil {
|
||||||
|
matcher.ErrChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wgLocal.Wait()
|
||||||
|
doneChan <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-doneChan
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Segment returns path p's segments as a slice of strings, using GenericSeparator as a separator.
|
||||||
|
|
||||||
|
If abs is true, the placeholder leading prefix(es) (if any) of GenericSeparator will be kept in-place;
|
||||||
|
otherwise it/they will be trimmed out.
|
||||||
|
e.g.:
|
||||||
|
|
||||||
|
abs == true: //foo/bar/baz => []string{"", "", "foo", "bar", "baz"}
|
||||||
|
abs == false: /foo/bar/baz => []string{"foo", "bar", "baz"}
|
||||||
|
|
||||||
|
If strict is true, any trailing GenericSeparator will be kept in-place;
|
||||||
|
otherwise they will be trimmed out.
|
||||||
|
e.g. (assuming abs == false):
|
||||||
|
|
||||||
|
strict == true: /foo/bar/baz// => []string{"foo", "bar", "baz", "", ""}
|
||||||
|
strict == false: /foo/bar/baz/ => []string{"foo", "bar", "baz"}
|
||||||
|
|
||||||
|
It is recommended to call RealPath for path's ptr first for normalization.
|
||||||
|
*/
|
||||||
|
func Segment(p string, abs, strict bool) (segments []string) {
|
||||||
|
|
||||||
|
if !abs {
|
||||||
|
p = strings.TrimLeft(p, string(GenericSeparator))
|
||||||
|
}
|
||||||
|
if !strict {
|
||||||
|
p = strings.TrimRight(p, string(GenericSeparator))
|
||||||
|
}
|
||||||
|
|
||||||
|
segments = strings.Split(p, string(GenericSeparator))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SegmentSys is exactly like Segment, except using os.PathSeparator instead of GenericSeparator.
|
||||||
|
func SegmentSys(p string, abs, strict bool) (segments []string) {
|
||||||
|
|
||||||
|
if !abs {
|
||||||
|
p = strings.TrimLeft(p, string(os.PathSeparator))
|
||||||
|
}
|
||||||
|
if !strict {
|
||||||
|
p = strings.TrimRight(p, string(os.PathSeparator))
|
||||||
|
}
|
||||||
|
|
||||||
|
segments = strings.Split(p, string(os.PathSeparator))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Strip is like Segment but trims out the leading n number of segments and reassembles the path using path.Join.
|
||||||
|
|
||||||
|
n may be negative, in which case the *trailing* n number of segments will be trimmed out.
|
||||||
|
(i.e. n == -1, p == `foo/bar/baz/quux` would be `foo/bar/baz`, not `bar/baz/quux`)
|
||||||
|
|
||||||
|
If you require more traditional slicing (e.g. with interval),
|
||||||
|
you may want to use path.Join with a sliced result of Segment instead.
|
||||||
|
e.g.: *only* the *last* n segments: path.Join(Segment(p, ...)[Len(p, ...)-n:]...)
|
||||||
|
|
||||||
|
If n == 0 or int(math.Abs(float64(n))) >= len(Segment(p, ...)), no transformation will be done.
|
||||||
|
|
||||||
|
e.g.
|
||||||
|
|
||||||
|
n == 2: foo/bar/baz/foobar/quux => baz/foobar/quux
|
||||||
|
n == -2: foo/bar/baz/foobar/quux => foo/bar/baz
|
||||||
|
*/
|
||||||
|
func Strip(p string, abs, strict bool, n int) (slicedPath string) {
|
||||||
|
|
||||||
|
var pLen int
|
||||||
|
var absN int
|
||||||
|
var segments []string
|
||||||
|
|
||||||
|
segments = Segment(p, abs, strict)
|
||||||
|
pLen = len(segments)
|
||||||
|
|
||||||
|
absN = int(math.Abs(float64(n)))
|
||||||
|
|
||||||
|
if n == 0 || absN >= pLen {
|
||||||
|
slicedPath = p
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
segments = segments[n:]
|
||||||
|
} else {
|
||||||
|
segments = segments[:pLen-absN]
|
||||||
|
}
|
||||||
|
|
||||||
|
slicedPath = path.Join(segments...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// StripSys is exactly like Strip but using (path/filepath).Join and SegmentSys.
|
||||||
|
func StripSys(p string, abs, strict bool, n int) (slicedPath string) {
|
||||||
|
|
||||||
|
var pLen int
|
||||||
|
var absN int
|
||||||
|
var segments []string
|
||||||
|
|
||||||
|
segments = SegmentSys(p, abs, strict)
|
||||||
|
pLen = len(segments)
|
||||||
|
|
||||||
|
absN = int(math.Abs(float64(n)))
|
||||||
|
|
||||||
|
if n == 0 || absN >= pLen {
|
||||||
|
slicedPath = p
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
segments = segments[n:]
|
||||||
|
} else {
|
||||||
|
segments = segments[:pLen-absN]
|
||||||
|
}
|
||||||
|
|
||||||
|
slicedPath = filepath.Join(segments...)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
WalkLink walks the recursive target(s) of lnk (unless/until MaxSymlinkLevel is hit, which will trigger ErrMaxSymlinkLevel)
|
||||||
|
until it reaches a real (non-symlink) target.
|
||||||
|
|
||||||
|
lnk will have RealPath called on it first.
|
||||||
|
|
||||||
|
If lnk is not a symlink, then tgts == []string{lnk} and err = nil.
|
||||||
|
|
||||||
|
A broken link will return fs.ErrNotExist, with tgts containing the targets up to and including the path that triggered the error.
|
||||||
|
|
||||||
|
If lnk itself does not exist, tgts will be nil and err will be that of fs.ErrNotExist.
|
||||||
|
|
||||||
|
relRoot is a root directory to resolve relative links to. If empty, relative link target `t` from link `l` will be treated
|
||||||
|
as relative to `(path/filepath).Dir(l)` (that is to say, `t = filepath.Join(filepath.Dir(l), os.Readlink(l))`).
|
||||||
|
*/
|
||||||
|
func WalkLink(lnk, relRoot string) (tgts []string, err error) {
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
var curDepth uint
|
||||||
|
var stat fs.FileInfo
|
||||||
|
var curTgt string
|
||||||
|
var prevTgt string
|
||||||
|
|
||||||
|
if exists, err = RealPathExists(&lnk); err != nil {
|
||||||
|
return
|
||||||
|
} else if !exists {
|
||||||
|
err = fs.ErrNotExist
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if relRoot != "" {
|
||||||
|
if err = RealPath(&relRoot); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tgts = []string{}
|
||||||
|
|
||||||
|
curTgt = lnk
|
||||||
|
for curDepth = 0; curDepth < MaxSymlinkLevel; curDepth++ {
|
||||||
|
if exists, err = RealPathExists(&curTgt); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
prevTgt = curTgt
|
||||||
|
tgts = append(tgts, curTgt)
|
||||||
|
if !exists {
|
||||||
|
err = fs.ErrNotExist
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stat, err = os.Lstat(curTgt); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if stat.Mode().Type()&os.ModeSymlink != os.ModeSymlink {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if curTgt, err = os.Readlink(curTgt); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(curTgt) {
|
||||||
|
if relRoot != "" {
|
||||||
|
curTgt = filepath.Join(relRoot, curTgt)
|
||||||
|
} else {
|
||||||
|
curTgt = filepath.Join(filepath.Dir(prevTgt), curTgt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if curDepth >= MaxSymlinkLevel {
|
||||||
|
err = ErrMaxSymlinkLevel
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
filterTimes checks a times.Timespec of a file using:
|
||||||
|
- an age specified by the caller
|
||||||
|
- an ageType bitmask for types of times to compare
|
||||||
|
- an olderThan bool (if false, the file must be younger than)
|
||||||
|
- an optional "now" timestamp for the age derivation.
|
||||||
|
*/
|
||||||
|
func filterTimes(tspec times.Timespec, age *time.Duration, ageType *pathTimeType, olderThan bool, now *time.Time) (include bool) {
|
||||||
|
|
||||||
|
var curAge time.Duration
|
||||||
|
var mask *bitmask.MaskBit
|
||||||
|
var tfunc func(t *time.Duration) (match bool) = func(t *time.Duration) (match bool) {
|
||||||
|
if olderThan {
|
||||||
|
match = *t > *age
|
||||||
|
} else {
|
||||||
|
match = *t < *age
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tspec == nil || age == nil || ageType == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mask = ageType.Mask()
|
||||||
|
|
||||||
|
if now == nil {
|
||||||
|
now = new(time.Time)
|
||||||
|
*now = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BTIME (if supported)
|
||||||
|
if tspec.HasBirthTime() && (mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeCreated))) {
|
||||||
|
curAge = now.Sub(tspec.BirthTime())
|
||||||
|
if include = tfunc(&curAge); include {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// MTIME
|
||||||
|
if mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeModified)) {
|
||||||
|
curAge = now.Sub(tspec.ModTime())
|
||||||
|
if include = tfunc(&curAge); include {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// CTIME (if supported)
|
||||||
|
if tspec.HasChangeTime() && (mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeChanged))) {
|
||||||
|
curAge = now.Sub(tspec.ChangeTime())
|
||||||
|
if include = tfunc(&curAge); include {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ATIME
|
||||||
|
if mask.HasFlag(bitmask.MaskBit(TimeAny)) || mask.HasFlag(bitmask.MaskBit(TimeAccessed)) {
|
||||||
|
curAge = now.Sub(tspec.AccessTime())
|
||||||
|
if include = tfunc(&curAge); include {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterNoFileDir(err error) (filtered error) {
|
||||||
|
|
||||||
|
filtered = err
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
filtered = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
125
paths/funcs_fssearchcriteria.go
Normal file
125
paths/funcs_fssearchcriteria.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
`io/fs`
|
||||||
|
`os`
|
||||||
|
`path/filepath`
|
||||||
|
`time`
|
||||||
|
|
||||||
|
`github.com/djherbis/times`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Match returns match (a ptr to a FsSearchResult if the specified path matches, otherwise nil),
|
||||||
|
miss (ptr the specified path does not match, otherwise nil), and an fs.DirEntry and fs.FileInfo
|
||||||
|
for path. d and/or fi may be nil.
|
||||||
|
|
||||||
|
If err is not nil, it represents an unexpected error and as such, both match and miss should be nil.
|
||||||
|
|
||||||
|
Match, miss, and err will all be nil if the filesystem object/path does not exist.
|
||||||
|
*/
|
||||||
|
func (f *FsSearchCriteria) Match(path string, d fs.DirEntry, fi fs.FileInfo) (match, miss *FsSearchResult, err error) {
|
||||||
|
|
||||||
|
var typeMode fs.FileMode
|
||||||
|
var m FsSearchResult
|
||||||
|
var typeFilter *bitmask.MaskBit = bitmask.NewMaskBitExplicit(uint(f.TargetType))
|
||||||
|
|
||||||
|
m = FsSearchResult{
|
||||||
|
Path: path,
|
||||||
|
DirEntry: d,
|
||||||
|
FileInfo: fi,
|
||||||
|
Criteria: f,
|
||||||
|
}
|
||||||
|
|
||||||
|
if f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// A DirEntry can be created from a FileInfo but not vice versa.
|
||||||
|
if m.FileInfo == nil {
|
||||||
|
if m.DirEntry != nil {
|
||||||
|
if m.FileInfo, err = m.DirEntry.Info(); err != nil {
|
||||||
|
err = filterNoFileDir(err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if f.FollowSymlinks {
|
||||||
|
if m.FileInfo, err = os.Stat(path); err != nil {
|
||||||
|
err = filterNoFileDir(err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if m.FileInfo, err = os.Lstat(path); err != nil {
|
||||||
|
err = filterNoFileDir(err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.DirEntry = fs.FileInfoToDirEntry(m.FileInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.DirEntry == nil {
|
||||||
|
m.DirEntry = fs.FileInfoToDirEntry(m.FileInfo)
|
||||||
|
}
|
||||||
|
if m.DirEntry == nil || m.FileInfo == nil {
|
||||||
|
m.MissReason = MissNoMeta
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Times, err = times.Stat(path); err != nil {
|
||||||
|
err = filterNoFileDir(err)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.PathPtrn != nil && !f.PathPtrn.MatchString(path) {
|
||||||
|
m.MissReason = MissBadPath
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if f.BasePtrn != nil && !f.BasePtrn.MatchString(filepath.Base(path)) {
|
||||||
|
m.MissReason = MissBadBase
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// age
|
||||||
|
if f.Age != nil {
|
||||||
|
if f.Now == nil {
|
||||||
|
f.Now = new(time.Time)
|
||||||
|
*f.Now = time.Now()
|
||||||
|
}
|
||||||
|
if !filterTimes(m.Times, f.Age, &f.AgeType, f.OlderThan, f.Now) {
|
||||||
|
m.MissReason = MissBadTime
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs object type (file, dir, etc.)
|
||||||
|
typeMode = m.FileInfo.Mode().Type()
|
||||||
|
if typeMode == 0 && f.NoFiles {
|
||||||
|
m.MissReason = MissFile
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
} else if typeMode != 0 {
|
||||||
|
if !typeFilter.IsOneOf(bitmask.MaskBit(typeMode)) {
|
||||||
|
m.MissReason = MissType
|
||||||
|
miss = &m
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it gets to here, it matches.
|
||||||
|
match = &m
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
13
paths/funcs_pathtimetype.go
Normal file
13
paths/funcs_pathtimetype.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mask returns a bitmask.MaskBit from a pathTimeType.
|
||||||
|
func (p *pathTimeType) Mask() (mask *bitmask.MaskBit) {
|
||||||
|
|
||||||
|
mask = bitmask.NewMaskBitExplicit(uint(*p))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
136
paths/types.go
Normal file
136
paths/types.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package paths
|
||||||
|
|
||||||
|
import (
|
||||||
|
`context`
|
||||||
|
`io/fs`
|
||||||
|
`regexp`
|
||||||
|
`sync`
|
||||||
|
`time`
|
||||||
|
|
||||||
|
`github.com/djherbis/times`
|
||||||
|
`golang.org/x/sync/semaphore`
|
||||||
|
`r00t2.io/goutils/bitmask`
|
||||||
|
)
|
||||||
|
|
||||||
|
// FsSearchCriteria contains filter criteria for SearchFsPaths* functions.
|
||||||
|
type FsSearchCriteria struct {
|
||||||
|
// Root indicates the root to search.
|
||||||
|
Root string `json:"root" toml:"RootPath" yaml:"Root Path" xml:"root,attr" validate:"dir"`
|
||||||
|
// NoMatch, if true, will not return matches. If NoMatch and NoMismatch are both true, no results will be returned.
|
||||||
|
NoMatch bool `json:"no_match" toml:"NoMatch" yaml:"No Matches" xml:"noMatch,attr"`
|
||||||
|
// NoMismatch, if true, will not return mismatches. If NoMatch and NoMismatch are both true, no results will be returned.
|
||||||
|
NoMismatch bool `json:"no_miss" toml:"NoMismatch" yaml:"No Mismatches" xml:"noMiss,attr"`
|
||||||
|
/*
|
||||||
|
TargetType defines what types of filesystem objects should be matched.
|
||||||
|
It can consist of one or more (io/)fs.FileMode types OR'd together
|
||||||
|
(ensure they are part of (io/)fs.ModeType).
|
||||||
|
(You can use 0 to match regular files explicitly, and/or NoFiles = true to exclude them.)
|
||||||
|
*/
|
||||||
|
TargetType fs.FileMode `json:"type_tgt" toml:"TargetType" yaml:"Target Type" xml:"typeTgt,attr"`
|
||||||
|
// NoFiles excludes files from TargetType-matching (as there isn't a way to explicitly exclude files otherwise if a non-zero mode is given).
|
||||||
|
NoFiles bool `json:"no_file" toml:"ExcludeFiles" yaml:"Exclude Files" xml:"noFile,attr"`
|
||||||
|
// FollowSymlinks, if true and a path being tested is a symlink, will use metadata (age, etc.) of the symlink itself rather than the link target.
|
||||||
|
FollowSymlinks bool `json:"follow_sym" toml:"FollowSymlinks" yaml:"Follow Symlinks" xml:"followSym,attr"`
|
||||||
|
// BasePtrn, if specified, will apply to the *base name (that is, quux.txt rather than /foo/bar/baz/quux.txt). See also PathPtrn.
|
||||||
|
BasePtrn *regexp.Regexp `json:"ptrn_base,omitempty" toml:"BaseNamePattern,omitempty" yaml:"Base Name Pattern,omitempty" xml:"ptrnBase,attr,omitempty"`
|
||||||
|
// PathPtrn, if specified, will apply to the *full path* (e.g. /foo/bar/baz/quux.txt, not just quux.txt). See also BasePtrn.
|
||||||
|
PathPtrn *regexp.Regexp `json:"ptrn_path,omitempty" toml:"PathPattern,omitempty" yaml:"Path Pattern,omitempty" xml:"ptrnPath,attr,omitempty"`
|
||||||
|
/*
|
||||||
|
Age, if specified, indicates the comparison of Now againt the AgeType of filesystem objects.
|
||||||
|
Use OlderThan to indicate if it should be older or newer.
|
||||||
|
*/
|
||||||
|
Age *time.Duration `json:"age,omitempty" toml:"Age,omitempty" yaml:"Age,omitempty" xml:"age,attr,omitempty"`
|
||||||
|
/*
|
||||||
|
AgeType can be one (or more, OR'd together) of the Time* constants in this package (TimeAny, TimeAccessed, TimeCreated,
|
||||||
|
TimeChanged, TimeModified) to indicate what timestamp(s) to use for comparing Age.
|
||||||
|
|
||||||
|
The zero-value is TimeAny.
|
||||||
|
|
||||||
|
The first matching timestamp will pass all time comparisons.
|
||||||
|
Be mindful of timestamp type support/limitations per OS/filesystem of Root.
|
||||||
|
|
||||||
|
Completely unused if Age is nil.
|
||||||
|
*/
|
||||||
|
AgeType pathTimeType `json:"type_age" toml:"AgeType" yaml:"Age Type" xml:"typeAge,attr"`
|
||||||
|
/*
|
||||||
|
OlderThan, if true (and Age is not nil), indicates that matching filesystem objects should have their
|
||||||
|
AgeType older than Now. If false, their AgeType should be *newer* than Now.
|
||||||
|
|
||||||
|
Completely unused if Age is nil.
|
||||||
|
*/
|
||||||
|
OlderThan bool `json:"older" toml:"OlderThan" yaml:"Older Than" xml:"older,attr"`
|
||||||
|
/*
|
||||||
|
Now expresses a time to compare to Age via AgeType and OlderThan.
|
||||||
|
Note that it may be any valid time, not necessarily "now".
|
||||||
|
If Age is specified but Now is nil, it will be populated with time.Now() when the search is invoked.
|
||||||
|
|
||||||
|
Completely unused if Age is nil.
|
||||||
|
*/
|
||||||
|
Now *time.Time `json:"now,omitempty" toml:"Now,omitempty" yaml:"Now,omitempty" xml:"now,attr,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FsSearchCriteriaAsync extends FsSearchCriteria for use in an asynchronous (goroutine) manner.
|
||||||
|
type FsSearchCriteriaAsync struct {
|
||||||
|
FsSearchCriteria
|
||||||
|
/*
|
||||||
|
WG should be a non-nil pointer to a sync.WaitGroup.
|
||||||
|
This is used to manage searching completion to the caller.
|
||||||
|
|
||||||
|
.Done() will be called once within the search function, but no .Add() will be called;
|
||||||
|
.Add() should be done by the caller beforehand.
|
||||||
|
*/
|
||||||
|
WG *sync.WaitGroup
|
||||||
|
// ResChan must be a non-nil channel for (positive) match results to be sent to.
|
||||||
|
ResChan chan *FsSearchResult
|
||||||
|
// MismatchChan, if not nil, will have negative matches/"misses" sent to it.
|
||||||
|
MismatchChan chan *FsSearchResult
|
||||||
|
/*
|
||||||
|
ErrChan should be a non-nil error channel for any unexpected errors encountered.
|
||||||
|
|
||||||
|
If nil, a panic will be raised.
|
||||||
|
*/
|
||||||
|
ErrChan chan error
|
||||||
|
/*
|
||||||
|
Semaphore is completely optional, but if non-nil
|
||||||
|
it will be used to limit concurrent filesystem
|
||||||
|
object processing.
|
||||||
|
|
||||||
|
It is generally a Very Good Idea(TM) to use this,
|
||||||
|
as the default is to dispatch all processing concurrently.
|
||||||
|
This can lead to some heavy I/O and CPU wait.
|
||||||
|
|
||||||
|
(See https://pkg.go.dev/golang.org/x/sync/semaphore for details.)
|
||||||
|
*/
|
||||||
|
Semaphore *semaphore.Weighted
|
||||||
|
/*
|
||||||
|
SemaphoreCtx is the context.Context to use for Semaphore.
|
||||||
|
If nil (but Sempaphore is not), one will be created locally/internally.
|
||||||
|
*/
|
||||||
|
SemaphoreCtx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// FsSearchResult contains a match/miss result for FsSearchCriteria and FsSearchCriteriaAsync.
|
||||||
|
type FsSearchResult struct {
|
||||||
|
/*
|
||||||
|
Path is the path to the object on the filesystem.
|
||||||
|
It may or may not exist at the time of return,
|
||||||
|
but will not be an empty string.
|
||||||
|
*/
|
||||||
|
Path string `json:"path" toml:"Path" yaml:"Path" xml:"path,attr"`
|
||||||
|
// DirEntry is the fs.DirEntry for the Path; note that .Name() is the base name only. TODO: serialization?
|
||||||
|
DirEntry fs.DirEntry `json:"-" toml:"-" yaml:"-" xml:"-"`
|
||||||
|
// FileInfo is the fs.FileInfo for the Path; note that .Name() is the base name only. TODO: serialization?
|
||||||
|
FileInfo fs.FileInfo `json:"-" toml:"-" yaml:"-" xml:"-"`
|
||||||
|
// Criteria is the evaluated criteria specified that this FsSearchResult matched.
|
||||||
|
Criteria *FsSearchCriteria `json:"criteria" toml:"Criteria" yaml:"Criteria" xml:"criteria"`
|
||||||
|
// Times holds the mtime, ctime, etc. of the filesystem object (where supported). TODO: serialization?
|
||||||
|
Times times.Timespec `json:"-" toml:"-" yaml:"-" xml:"-"`
|
||||||
|
// MissReason contains the reason the result is a miss (MissNoMiss if a match); see the Miss* constants.
|
||||||
|
MissReason missReason `json:"miss_reason" toml:"MissReason" yaml:"Miss Reason" xml:"miss,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type missReason string
|
||||||
|
|
||||||
|
type pathMode bitmask.MaskBit
|
||||||
|
|
||||||
|
type pathTimeType bitmask.MaskBit
|
27
pdsh/docs.go
Normal file
27
pdsh/docs.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
/*
|
||||||
|
Package pdsh (!! WIP !!) provides PDSH-compatible functionality for parsing group/gender/etc. files/sources.
|
||||||
|
|
||||||
|
Note that this library will *only* generate the host list/etc.,
|
||||||
|
it will not actually connect to anything.
|
||||||
|
It simply provides ways of returning lists of hosts using generation rules/patterns.
|
||||||
|
Said another way, it does not implement any of PDSH's "rcmd" modules, only the "misc" modules.
|
||||||
|
|
||||||
|
(As a hint, you can implement SSH connections via [golang.org/x/crypto/ssh] in goroutine'd functions
|
||||||
|
using this package to generate the target addresses, etc.)
|
||||||
|
|
||||||
|
Currently, the only supported PDSH module is misc/dshgroup (as [r00t2.io/sysutils/pdsh/dshgroup]) but additional/all other
|
||||||
|
host list modules are planned.
|
||||||
|
|
||||||
|
This package deviates slightly from PDSH in some areas; allowing for more loose or more strict behavior occasionally.
|
||||||
|
Whenever a deviation is offered, this package allows for configuring the generator to behave exactly like PDSH instead
|
||||||
|
(if the deviating behavior is enabled by default).
|
||||||
|
|
||||||
|
For details, see the [chaos/pdsh GitHub], the associated [MAN page source], and/or the [rendered MAN page] (via ManKier).
|
||||||
|
You may also want to see the ManKier rendered MAN pages for the [pdsh package].
|
||||||
|
|
||||||
|
[chaos/pdsh GitHub]: https://github.com/chaos/pdsh/
|
||||||
|
[MAN page source]: https://github.com/chaos/pdsh/blob/master/doc/pdsh.1.in
|
||||||
|
[rendered MAN page]: https://www.mankier.com/1/pdsh
|
||||||
|
[pdsh package]: https://www.mankier.com/package/pdsh
|
||||||
|
*/
|
||||||
|
package pdsh
|
18
pdsh/dshgroup/consts.go
Normal file
18
pdsh/dshgroup/consts.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"r00t2.io/goutils/remap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dshGrpPathEnv string = "DSHGROUP_PATH"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DSH Groups
|
||||||
|
var (
|
||||||
|
dshGrpDefGrpDir string = "/etc/dsh/group"
|
||||||
|
dshGrpInclPtrn *remap.ReMap = &remap.ReMap{Regexp: regexp.MustCompile(`^\s*#include\s+(?P<incl>.+)$`)}
|
||||||
|
dshGrpSubTokenPtrn *remap.ReMap = &remap.ReMap{Regexp: regexp.MustCompile(`^(?P<start_pad>0+)?(?P<start>[0-9]+)(-(?P<end_pad>0+)?(?P<end>[0-9]+))?$`)}
|
||||||
|
)
|
30
pdsh/dshgroup/docs.go
Normal file
30
pdsh/dshgroup/docs.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
Package dshgroup implements so-called "DSH (Dancer's SHell) Group" files.
|
||||||
|
|
||||||
|
It is equivalent to PDSH's [misc/dshgroup] module. ([source])
|
||||||
|
|
||||||
|
Be sure to read the [HOSTLIST EXPRESSIONS] section in the MAN page.
|
||||||
|
|
||||||
|
# Notable Differences
|
||||||
|
|
||||||
|
* This package allows for *never* reading the DSHGROUP_PATH env var (PDSH always reads it) via the "NoEnv" option.
|
||||||
|
|
||||||
|
* This package allows for not adding /etc/dsh/group/<group> files by default via the "NoDefault" option.
|
||||||
|
|
||||||
|
* This package allows for not adding ~/.dsh/group/<group> files by default via the "NoHome" option.
|
||||||
|
|
||||||
|
* This package allows for a "ForceLegacy" mode, disabled by default, that DISABLES the PDSH
|
||||||
|
extension for "#include <path/group>" extension.
|
||||||
|
If ForceLegacy is enabled, "#include ..." lines will be treated as comment lines (ignored) instead.
|
||||||
|
|
||||||
|
* This package allows for whitespace between group patterns. This can be disabled by the "StrictWhitespace" option.
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/dshgroup]: https://www.mankier.com/1/pdsh#dshgroup_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/dshgroup.c
|
||||||
|
[HOSTLIST EXPRESSIONS]: https://www.mankier.com/1/pdsh#Hostlist_Expressions
|
||||||
|
*/
|
||||||
|
package dshgroup
|
11
pdsh/dshgroup/errs.go
Normal file
11
pdsh/dshgroup/errs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrEmptyDshGroupTok error = errors.New("empty dsh group pattern token")
|
||||||
|
ErrInvalidDshGrpSyntax error = errors.New("invalid dsh group file syntax")
|
||||||
|
ErrInvalidDshGrpPtrn error = errors.New("invalid dsh group pattern syntax")
|
||||||
|
)
|
176
pdsh/dshgroup/funcs_dshgrouplister.go
Normal file
176
pdsh/dshgroup/funcs_dshgrouplister.go
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"r00t2.io/sysutils/envs"
|
||||||
|
"r00t2.io/sysutils/paths"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Evaluate returns a list of directories and files that would be searched/read with
|
||||||
|
the given call and DshGroupLister configuration, in order of parsing.
|
||||||
|
|
||||||
|
The behavior is the same as DshGroupLister.GroupedHosts, including searchPaths.
|
||||||
|
If DshGroupLister.ForceLegacy is false, include files will also be parsed in.
|
||||||
|
(This may incur slightly additional processing time.)
|
||||||
|
|
||||||
|
Only existing dirs/files are returned. Symlinks are evaluated to their target.
|
||||||
|
|
||||||
|
If dedupe is true, deduplication is performed. This adds some cycles, but may be desired if you make heavy use of symlinks.
|
||||||
|
*/
|
||||||
|
func (d *DshGroupLister) Evaluate(dedupe bool, searchPaths ...string) (dirs, files []string, err error) {
|
||||||
|
|
||||||
|
var exists bool
|
||||||
|
// var u *user.User
|
||||||
|
var spl []string
|
||||||
|
var dPath string
|
||||||
|
var fPath string
|
||||||
|
var incls []string
|
||||||
|
var de fs.DirEntry
|
||||||
|
var stat fs.FileInfo
|
||||||
|
var entries []fs.DirEntry
|
||||||
|
var tmpF []string
|
||||||
|
var fpathMap map[string]bool = make(map[string]bool)
|
||||||
|
|
||||||
|
// TODO: Does/how does pdsh resolve relative symlinks?
|
||||||
|
|
||||||
|
// Dirs first
|
||||||
|
if searchPaths != nil {
|
||||||
|
for _, dPath = range searchPaths {
|
||||||
|
if _, exists, _, _, stat, err = paths.RealPathExistsStatTarget(&dPath, "."); err != nil {
|
||||||
|
return
|
||||||
|
} else if !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dirs = append(dirs, dPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !d.NoHome && envs.HasEnv("HOME") {
|
||||||
|
// So pdsh actually checks $HOME, it doesn't pull the homedir for the user.
|
||||||
|
/*
|
||||||
|
if u, err = user.Current(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dPath = filepath.Join(u.HomeDir, ".dsh", "group")
|
||||||
|
*/
|
||||||
|
dPath = filepath.Join(os.Getenv("HOME"), ".dsh", "group")
|
||||||
|
if _, exists, _, _, stat, err = paths.RealPathExistsStatTarget(&dPath, "."); err != nil {
|
||||||
|
return
|
||||||
|
} else if exists {
|
||||||
|
if stat.IsDir() {
|
||||||
|
dirs = append(dirs, dPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !d.NoEnv && envs.HasEnv(dshGrpPathEnv) {
|
||||||
|
spl = strings.Split(os.Getenv(dshGrpPathEnv), string(os.PathListSeparator))
|
||||||
|
for _, dPath = range spl {
|
||||||
|
if strings.TrimSpace(dPath) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, exists, _, _, stat, err = paths.RealPathExistsStatTarget(&dPath, "."); err != nil {
|
||||||
|
return
|
||||||
|
} else if !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dirs = append(dirs, dPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !d.NoDefault && !envs.HasEnv(dshGrpPathEnv) {
|
||||||
|
dPath = dshGrpDefGrpDir
|
||||||
|
if _, exists, _, _, stat, err = paths.RealPathExistsStatTarget(&dPath, "."); err != nil {
|
||||||
|
return
|
||||||
|
} else if exists {
|
||||||
|
if stat.IsDir() {
|
||||||
|
dirs = append(dirs, dPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then files. Do *not* walk the dirs; only first-level is parsed by pdsh so this does the same.
|
||||||
|
for _, dPath = range dirs {
|
||||||
|
if entries, err = os.ReadDir(dPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, de = range entries {
|
||||||
|
fPath = filepath.Join(dPath, de.Name())
|
||||||
|
// NORMALLY, os.Stat calls stat(2), which follows symlinks. (os.Lstat()/lstat(2) does not.)
|
||||||
|
// But the stat for an fs.DirEntry? Uses lstat.
|
||||||
|
// Whatever, we want to resolve symlinks anyways.
|
||||||
|
if _, exists, _, _, stat, err = paths.RealPathExistsStatTarget(&fPath, "."); err != nil {
|
||||||
|
return
|
||||||
|
} else if exists {
|
||||||
|
if !stat.Mode().IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if dedupe {
|
||||||
|
if _, exists = fpathMap[fPath]; !exists {
|
||||||
|
fpathMap[fPath] = true
|
||||||
|
files = append(files, fPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files = append(files, fPath)
|
||||||
|
}
|
||||||
|
if !d.ForceLegacy {
|
||||||
|
if incls, err = getDshGrpIncludes(fPath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if dedupe {
|
||||||
|
for _, i := range incls {
|
||||||
|
if _, exists = fpathMap[i]; !exists {
|
||||||
|
fpathMap[i] = true
|
||||||
|
files = append(files, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files = append(files, incls...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files = tmpF
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
GroupedHosts returns a map of `map[<GROUP>][]string{<HOST>[, <HOST>, ...]}.
|
||||||
|
|
||||||
|
Additional search paths may be specified via searchpaths.
|
||||||
|
|
||||||
|
If there are any conflicting group names, the first found group name is used.
|
||||||
|
For example, assuming the group name `<GROUP>`, the following files will be checked in this order:
|
||||||
|
|
||||||
|
0. IF searchPaths is not nil:
|
||||||
|
a. searchpaths[0]/<GROUP>
|
||||||
|
b. searchpaths[1]/<GROUP>
|
||||||
|
c. searchpaths[2]/<GROUP>
|
||||||
|
d. ( ... )
|
||||||
|
1. IF DshGroupLister.NoHome is false:
|
||||||
|
a. `~/.dsh/group/<GROUP>`
|
||||||
|
2. IF $DSHGROUP_PATH is defined AND DshGroupLister.NoEnv is false:
|
||||||
|
a. `strings.Split(os.Getenv("DSHGROUP_PATH", string(os.PathListSeparator)))[0]/<GROUP>`
|
||||||
|
b. `strings.Split(os.Getenv("DSHGROUP_PATH", string(os.PathListSeparator)))[1]/<GROUP>`
|
||||||
|
c. `strings.Split(os.Getenv("DSHGROUP_PATH", string(os.PathListSeparator)))[2]/<GROUP>`
|
||||||
|
d. ( ... )
|
||||||
|
3. IF $DSHGROUP_PATH is NOT defined AND DshGroupLister.NoDefault is false:
|
||||||
|
a. `/etc/dsh/group/<GROUP>`
|
||||||
|
*/
|
||||||
|
func (d *DshGroupLister) GroupedHosts(dedupe bool, searchPaths ...string) (groupedHosts map[string][]string, err error) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
309
pdsh/dshgroup/funcs_dshgrp.go
Normal file
309
pdsh/dshgroup/funcs_dshgrp.go
Normal file
@ -0,0 +1,309 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"r00t2.io/sysutils/paths"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
ParseDshPtrn parses ptrn using the DSH group pattern ptrn as according to `HOSTLIST EXPRESSSIONS` in pdsh(1).
|
||||||
|
`#include` directives are explicitly skipped; this only parses actual generation pattern strings.
|
||||||
|
|
||||||
|
The returning generator may either be iterated over with `range` or have `Hosts()` called explicitly. // TODO
|
||||||
|
*/
|
||||||
|
func ParseDshPtrn(ptrn string) (generator *DshGrpGenerator, err error) {
|
||||||
|
|
||||||
|
var r rune
|
||||||
|
var pos int
|
||||||
|
var s string
|
||||||
|
var inToken bool
|
||||||
|
var tokStr string
|
||||||
|
var tok dshGrpToken
|
||||||
|
var strBuf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
var tokBuf *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
|
||||||
|
// TODO: users can be specified per-pattern.
|
||||||
|
|
||||||
|
generator = &DshGrpGenerator{
|
||||||
|
tokens: make([]dshGrpToken, 0),
|
||||||
|
tokenized: make([]string, 0),
|
||||||
|
text: ptrn,
|
||||||
|
}
|
||||||
|
|
||||||
|
s = strings.TrimSpace(ptrn)
|
||||||
|
if s == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(s, "#") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// A quick sanity check. The end-state from the state machine below will catch any weird bracket issues beyond this.
|
||||||
|
if strings.Count(s, "[") != strings.Count(s, "]") {
|
||||||
|
err = ErrInvalidDshGrpSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now the hacky bits. We read until we get to a start-token ('['), end-token (']'), or a pattern separator (',') that is *outside* a range token.
|
||||||
|
for pos, r = range s {
|
||||||
|
switch r {
|
||||||
|
case '[':
|
||||||
|
if inToken {
|
||||||
|
// Nested [...[
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: ErrInvalidDshGrpSyntax,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
generator.tokenized = append(generator.tokenized, strBuf.String())
|
||||||
|
strBuf.Reset()
|
||||||
|
inToken = true
|
||||||
|
case ']':
|
||||||
|
if !inToken {
|
||||||
|
// Nested ]...]
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: ErrInvalidDshGrpSyntax,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tokStr = tokBuf.String()
|
||||||
|
if tok, err = parseDshGrpToken(tokStr); err != nil {
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: err,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
generator.tokens = append(generator.tokens, tok)
|
||||||
|
tokBuf.Reset()
|
||||||
|
// Don't forget the empty element placeholder.
|
||||||
|
generator.tokenized = append(generator.tokenized, "")
|
||||||
|
inToken = false
|
||||||
|
default:
|
||||||
|
if inToken {
|
||||||
|
// If it isn't between '0' and '9', isn't '-', and isn't ','...
|
||||||
|
if !(0x30 <= r && r <= 0x39) && (r != 0x2d) && (r != 0x2c) {
|
||||||
|
// It's not a valid token. (The actual syntax is validated in parseDshGrpToken and parseDshGrpSubtoken)
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: ErrInvalidDshGrpSyntax,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tokBuf.WriteRune(r)
|
||||||
|
} else {
|
||||||
|
// TODO: confirm if inline comments and/or trailing/leading whitespace are handled by pdsh?
|
||||||
|
if strings.TrimSpace(string(r)) == "" || r == '#' {
|
||||||
|
// Whitespace is "invalid" (treat it as the end of the pattern).
|
||||||
|
// Same for end-of-line octothorpes.
|
||||||
|
if tokBuf.Len() > 0 {
|
||||||
|
// This should never happen.
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: ErrInvalidDshGrpSyntax,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if strBuf.Len() > 0 {
|
||||||
|
generator.tokenized = append(generator.tokenized, strBuf.String())
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Otherwise we just check for valid DNS chars.
|
||||||
|
if !(0x30 <= r && r <= 0x39) && // '0'-'9'
|
||||||
|
(r != 0x2d) && // '-'
|
||||||
|
(r != 0x2e) && // '.'
|
||||||
|
!(0x41 <= r && r <= 0x5a) && // 'A' through 'Z' (inclusive)
|
||||||
|
!(0x61 <= r && r <= 0x7a) { // 'a' through 'z' (inclusive)
|
||||||
|
err = &PtrnParseErr{
|
||||||
|
pos: uint(pos),
|
||||||
|
ptrn: ptrn,
|
||||||
|
r: r,
|
||||||
|
err: ErrInvalidDshGrpPtrn,
|
||||||
|
inToken: inToken,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// (Probably) valid(-ish), so add it.
|
||||||
|
strBuf.WriteRune(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the token never closed, it's also invalid.
|
||||||
|
if inToken {
|
||||||
|
err = ErrInvalidDshGrpSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDshGrpToken parses a token string into a dshGrpToken.
|
||||||
|
func parseDshGrpToken(tokenStr string) (token dshGrpToken, err error) {
|
||||||
|
|
||||||
|
var s string
|
||||||
|
var st []string
|
||||||
|
var sub dshGrpSubtoken
|
||||||
|
|
||||||
|
s = strings.TrimSpace(tokenStr)
|
||||||
|
if s == "" {
|
||||||
|
err = ErrEmptyDshGroupTok
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st = strings.Split(s, ",")
|
||||||
|
token = dshGrpToken{
|
||||||
|
token: tokenStr,
|
||||||
|
subtokens: make([]dshGrpSubtoken, 0, len(st)),
|
||||||
|
}
|
||||||
|
for _, s = range st {
|
||||||
|
if strings.TrimSpace(s) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if sub, err = parseDshGrpSubtoken(s); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
token.subtokens = append(token.subtokens, sub)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDshGrpSubtoken parses a subtoken string into a dshGrpSubtoken.
|
||||||
|
func parseDshGrpSubtoken(subTokenStr string) (subtoken dshGrpSubtoken, err error) {
|
||||||
|
|
||||||
|
var u64 uint64
|
||||||
|
var vals []string
|
||||||
|
var endPad string
|
||||||
|
var startPad string
|
||||||
|
var st dshGrpSubtoken
|
||||||
|
var matches map[string][]string
|
||||||
|
|
||||||
|
if matches = dshGrpSubTokenPtrn.MapString(subTokenStr, false, false, true); matches == nil || len(matches) == 0 {
|
||||||
|
err = ErrInvalidDshGrpPtrn
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if vals = matches["start_pad"]; vals != nil && len(vals) == 1 {
|
||||||
|
startPad = vals[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if vals = matches["start"]; vals != nil && len(vals) == 1 {
|
||||||
|
if u64, err = strconv.ParseUint(vals[0], 10, 64); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st.start = uint(u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
if vals = matches["end_pad"]; vals != nil && len(vals) == 1 {
|
||||||
|
endPad = vals[0]
|
||||||
|
}
|
||||||
|
if vals = matches["end"]; vals != nil && len(vals) == 1 {
|
||||||
|
if u64, err = strconv.ParseUint(vals[0], 10, 64); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
st.end = uint(u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
if startPad != "" && endPad != "" {
|
||||||
|
// We set the pad to the largest.
|
||||||
|
if len(startPad) > len(endPad) {
|
||||||
|
st.pad = startPad
|
||||||
|
} else {
|
||||||
|
st.pad = endPad
|
||||||
|
}
|
||||||
|
} else if startPad != "" {
|
||||||
|
st.pad = startPad
|
||||||
|
} else if endPad != "" {
|
||||||
|
st.pad = endPad
|
||||||
|
}
|
||||||
|
|
||||||
|
subtoken = st
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
getDshGrpIncludes parses fpath for `#include ...` directives. It skips any entries in which
|
||||||
|
`len(paths.SegmentSys(p) == []string{p}`, as these are inherently included by the dir read.
|
||||||
|
|
||||||
|
It is assumed that fpath is a cleaned, absolute filepath.
|
||||||
|
*/
|
||||||
|
func getDshGrpIncludes(fpath string) (includes []string, err error) {
|
||||||
|
|
||||||
|
var f *os.File
|
||||||
|
var line string
|
||||||
|
var exists bool
|
||||||
|
var inclpath string
|
||||||
|
var subIncl []string
|
||||||
|
var segs []string
|
||||||
|
var scanner *bufio.Scanner
|
||||||
|
var matches map[string][]string
|
||||||
|
|
||||||
|
if f, err = os.Open(fpath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
scanner = bufio.NewScanner(f)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line = strings.TrimSpace(scanner.Text())
|
||||||
|
if line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !dshGrpInclPtrn.MatchString(line) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matches = dshGrpInclPtrn.MapString(line, false, false, true)
|
||||||
|
if matches == nil {
|
||||||
|
err = ErrInvalidDshGrpSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if matches["incl"] == nil || len(matches["incl"]) == 0 {
|
||||||
|
err = ErrInvalidDshGrpSyntax
|
||||||
|
return
|
||||||
|
}
|
||||||
|
inclpath = matches["incl"][0]
|
||||||
|
segs = paths.SegmentSys(inclpath, false, false)
|
||||||
|
if segs == nil || len(segs) == 0 || (len(segs) == 1 && segs[0] == inclpath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists, err = paths.RealPathExists(&inclpath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
includes = append(includes, inclpath)
|
||||||
|
if subIncl, err = getDshGrpIncludes(inclpath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if subIncl != nil && len(subIncl) > 0 {
|
||||||
|
includes = append(includes, subIncl...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
59
pdsh/dshgroup/funcs_dshgrp_test.go
Normal file
59
pdsh/dshgroup/funcs_dshgrp_test.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
`log`
|
||||||
|
`testing`
|
||||||
|
|
||||||
|
`github.com/davecgh/go-spew/spew`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseDshPtrn(t *testing.T) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var idx int
|
||||||
|
var s string
|
||||||
|
var generator *DshGrpGenerator
|
||||||
|
var hostList []string
|
||||||
|
var tgtList []string = []string{
|
||||||
|
"0foo1bar46004quux", "0foo1bar46005quux", "0foo1bar46006quux", "0foo1bar46007quux", "0foo1bar46008quux", "0foo1bar46009quux",
|
||||||
|
"0foo1bar4615quux", "0foo1bar47004quux", "0foo1bar47005quux", "0foo1bar47006quux", "0foo1bar47007quux", "0foo1bar47008quux",
|
||||||
|
"0foo1bar47009quux", "0foo1bar4715quux", "0foo2bar46004quux", "0foo2bar46005quux", "0foo2bar46006quux", "0foo2bar46007quux",
|
||||||
|
"0foo2bar46008quux", "0foo2bar46009quux", "0foo2bar4615quux", "0foo2bar47004quux", "0foo2bar47005quux", "0foo2bar47006quux",
|
||||||
|
"0foo2bar47007quux", "0foo2bar47008quux", "0foo2bar47009quux", "0foo2bar4715quux", "0foo3bar46004quux", "0foo3bar46005quux",
|
||||||
|
"0foo3bar46006quux", "0foo3bar46007quux", "0foo3bar46008quux", "0foo3bar46009quux", "0foo3bar4615quux", "0foo3bar47004quux",
|
||||||
|
"0foo3bar47005quux", "0foo3bar47006quux", "0foo3bar47007quux", "0foo3bar47008quux", "0foo3bar47009quux", "0foo3bar4715quux",
|
||||||
|
"1foo1bar46004quux", "1foo1bar46005quux", "1foo1bar46006quux", "1foo1bar46007quux", "1foo1bar46008quux", "1foo1bar46009quux",
|
||||||
|
"1foo1bar4615quux", "1foo1bar47004quux", "1foo1bar47005quux", "1foo1bar47006quux", "1foo1bar47007quux", "1foo1bar47008quux",
|
||||||
|
"1foo1bar47009quux", "1foo1bar4715quux", "1foo2bar46004quux", "1foo2bar46005quux", "1foo2bar46006quux", "1foo2bar46007quux",
|
||||||
|
"1foo2bar46008quux", "1foo2bar46009quux", "1foo2bar4615quux", "1foo2bar47004quux", "1foo2bar47005quux", "1foo2bar47006quux",
|
||||||
|
"1foo2bar47007quux", "1foo2bar47008quux", "1foo2bar47009quux", "1foo2bar4715quux", "1foo3bar46004quux", "1foo3bar46005quux",
|
||||||
|
"1foo3bar46006quux", "1foo3bar46007quux", "1foo3bar46008quux", "1foo3bar46009quux", "1foo3bar4615quux", "1foo3bar47004quux",
|
||||||
|
"1foo3bar47005quux", "1foo3bar47006quux", "1foo3bar47007quux", "1foo3bar47008quux", "1foo3bar47009quux", "1foo3bar4715quux",
|
||||||
|
"2foo1bar46004quux", "2foo1bar46005quux", "2foo1bar46006quux", "2foo1bar46007quux", "2foo1bar46008quux", "2foo1bar46009quux",
|
||||||
|
"2foo1bar4615quux", "2foo1bar47004quux", "2foo1bar47005quux", "2foo1bar47006quux", "2foo1bar47007quux", "2foo1bar47008quux",
|
||||||
|
"2foo1bar47009quux", "2foo1bar4715quux", "2foo2bar46004quux", "2foo2bar46005quux", "2foo2bar46006quux", "2foo2bar46007quux",
|
||||||
|
"2foo2bar46008quux", "2foo2bar46009quux", "2foo2bar4615quux", "2foo2bar47004quux", "2foo2bar47005quux", "2foo2bar47006quux",
|
||||||
|
"2foo2bar47007quux", "2foo2bar47008quux", "2foo2bar47009quux", "2foo2bar4715quux", "2foo3bar46004quux", "2foo3bar46005quux",
|
||||||
|
"2foo3bar46006quux", "2foo3bar46007quux", "2foo3bar46008quux", "2foo3bar46009quux", "2foo3bar4615quux", "2foo3bar47004quux",
|
||||||
|
"2foo3bar47005quux", "2foo3bar47006quux", "2foo3bar47007quux", "2foo3bar47008quux", "2foo3bar47009quux", "2foo3bar4715quux",
|
||||||
|
}
|
||||||
|
|
||||||
|
if generator, err = ParseDshPtrn("[0-2]foo[1-3]bar[4][6-7]baz[004-009,15]quux"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = spew.Sdump(generator)
|
||||||
|
|
||||||
|
hostList = generator.Hosts()
|
||||||
|
t.Log(hostList)
|
||||||
|
|
||||||
|
if len(hostList) != len(tgtList) {
|
||||||
|
t.Fatalf("Generated list length (%d) does not match target (%d)", len(hostList), len(tgtList))
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, s = range hostList {
|
||||||
|
if s != tgtList[idx] {
|
||||||
|
log.Fatalf("Test vector %d ('%s') does not match generated value '%s'", idx+1, tgtList[idx], s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
36
pdsh/dshgroup/funcs_dshgrpgenerator.go
Normal file
36
pdsh/dshgroup/funcs_dshgrpgenerator.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
func (d *DshGrpGenerator) Generate() (yieldFunc func(yield func(host string) (done bool))) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DshGrpGenerator) Hosts() (hostList []string) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DshGrpGenerator) Host() (host string) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DshGrpGenerator) Next() (done bool) {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DshGrpGenerator) Reset() {
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
16
pdsh/dshgroup/funcs_ptrnparseerr.go
Normal file
16
pdsh/dshgroup/funcs_ptrnparseerr.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error conforms a PtrnParseErr to error interface.
|
||||||
|
func (p *PtrnParseErr) Error() (errStr string) {
|
||||||
|
|
||||||
|
errStr = fmt.Sprintf(
|
||||||
|
"Parse error in pattern '%s', position %d rune '%s' (%#x) (in token: %v): %v",
|
||||||
|
p.ptrn, p.pos, string(p.r), p.r, p.inToken, p.err,
|
||||||
|
)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
90
pdsh/dshgroup/types.go
Normal file
90
pdsh/dshgroup/types.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package dshgroup
|
||||||
|
|
||||||
|
// TODO: This... doesn't really have much usefulness, does it?
|
||||||
|
/*
|
||||||
|
type (
|
||||||
|
HostLister interface {
|
||||||
|
// Hosts returns ALL hsots (where applicable) that are considered/generated for a Lister.
|
||||||
|
Hosts() (hosts []string, err error)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
*/
|
||||||
|
|
||||||
|
type (
|
||||||
|
/*
|
||||||
|
DshGroupLister behaves like the host list generator
|
||||||
|
for pdsh(1)'s "dshgroup module options" (the `misc/dshgroup`
|
||||||
|
module for pdsh).
|
||||||
|
*/
|
||||||
|
DshGroupLister struct {
|
||||||
|
/*
|
||||||
|
NoEnv, if true, will *not* use DSHGROUP_PATH (force-defaulting to /etc/dsh/group/,
|
||||||
|
but see NoDefault).
|
||||||
|
*/
|
||||||
|
NoEnv bool
|
||||||
|
/*
|
||||||
|
NoDefault, if true, will *not* add the default path `/etc/dsh/group/`
|
||||||
|
to the search paths.
|
||||||
|
|
||||||
|
If NoDefault is false, this path is only added if DSHGROUP_PATH is not defined
|
||||||
|
(or, if it IS defined, if NoEnv is true).
|
||||||
|
*/
|
||||||
|
NoDefault bool
|
||||||
|
// NoHome, if true, will *not* add the `~/.dsh/group/` path to the search paths.
|
||||||
|
NoHome bool
|
||||||
|
/*
|
||||||
|
ForceLegacy, if true, will disable the PDSH `#include <PATH|GROUP>` modification --
|
||||||
|
treating the source as a traditional DSH group file instead (e.g. `#include ...`
|
||||||
|
is treated as just a comment).
|
||||||
|
*/
|
||||||
|
ForceLegacy bool
|
||||||
|
// StrictWhitespace follows the same behavior as PDSH regarding no whitespace between patterns.
|
||||||
|
StrictWhitespace bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// DshGrpGenerator generates a list of hosts according to the pdsh "dshgroup" module.
|
||||||
|
DshGrpGenerator struct {
|
||||||
|
/*
|
||||||
|
tokens are interleaved with tokenized and indexed *after*;
|
||||||
|
in other words, str = <substr0><token0><substr1><token1>...
|
||||||
|
*/
|
||||||
|
tokens []dshGrpToken
|
||||||
|
// tokenized holds the split original text with tokens removed and split where the tokens occur.
|
||||||
|
tokenized []string
|
||||||
|
// text holds the original pattern.
|
||||||
|
text string
|
||||||
|
}
|
||||||
|
dshGrpToken struct {
|
||||||
|
/*
|
||||||
|
token contains the original range specifier.
|
||||||
|
Tokens may be e.g.:
|
||||||
|
|
||||||
|
* 3: str3
|
||||||
|
* 3-5: str3, str4, str5
|
||||||
|
* 3,5: str3, str5
|
||||||
|
*/
|
||||||
|
token string
|
||||||
|
// subtokens hold a split of the individual range specifiers.
|
||||||
|
subtokens []dshGrpSubtoken
|
||||||
|
}
|
||||||
|
dshGrpSubtoken struct {
|
||||||
|
// start indicates either the single value or the start of the range.
|
||||||
|
start uint
|
||||||
|
// end, if 0 or less than start, indicates a single-value range.
|
||||||
|
end uint
|
||||||
|
// pad, if non-empty, is a string to add to the beginning of each of the generated substrings for this subtoken.
|
||||||
|
pad string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
PtrnParseErr struct {
|
||||||
|
pos uint
|
||||||
|
ptrn string
|
||||||
|
r rune
|
||||||
|
err error
|
||||||
|
inToken bool
|
||||||
|
}
|
||||||
|
)
|
11
pdsh/genders/docs.go
Normal file
11
pdsh/genders/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package genders implements the [misc/genders] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/genders]: https://www.mankier.com/1/pdsh#genders_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/genders.c
|
||||||
|
*/
|
||||||
|
package genders
|
11
pdsh/machines/docs.go
Normal file
11
pdsh/machines/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package machines implements the [misc/machines] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/machines]: https://www.mankier.com/1/pdsh#machines_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/machines.c
|
||||||
|
*/
|
||||||
|
package machines
|
11
pdsh/netgroup/docs.go
Normal file
11
pdsh/netgroup/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package netgroup implements the [misc/netgroup] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/netgroup]: https://www.mankier.com/1/pdsh#netgroup_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/netgroup.c
|
||||||
|
*/
|
||||||
|
package netgroup
|
11
pdsh/nodeupdown/docs.go
Normal file
11
pdsh/nodeupdown/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package nodeupdown implements the [misc/nodeupdown] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/nodeupdown]: https://www.mankier.com/1/pdsh#nodeupdown_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/nodeupdown.c
|
||||||
|
*/
|
||||||
|
package nodeupdown
|
11
pdsh/slurm/docs.go
Normal file
11
pdsh/slurm/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package slurm implements the [misc/slurm] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/slurm]: https://www.mankier.com/1/pdsh#slurm_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/slurm.c
|
||||||
|
*/
|
||||||
|
package slurm
|
11
pdsh/torque/docs.go
Normal file
11
pdsh/torque/docs.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*
|
||||||
|
Package torque implements the [misc/torque] PDSH module. ([source])
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[misc/torque]: https://www.mankier.com/1/pdsh#torque_module_options
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/modules/torque.c
|
||||||
|
*/
|
||||||
|
package torque
|
60
pdsh/types.go
Normal file
60
pdsh/types.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package pdsh
|
||||||
|
|
||||||
|
type (
|
||||||
|
/*
|
||||||
|
Generator is one of the PDSH host generators/iterators offered by this module.
|
||||||
|
|
||||||
|
Note that these generators/iterators are *stateful*, which means they shouldn't
|
||||||
|
(probably; I'm not your dad) be used concurrently (unless you want some hard-to-debug results)
|
||||||
|
and all methods advance the generator - so you probably don't want to call both Generate() and
|
||||||
|
Next()/Host() on the same instance, for example.
|
||||||
|
*/
|
||||||
|
Generator interface {
|
||||||
|
/*
|
||||||
|
Generate provides a Go-native iterator (also called a "RangeFunc" or "range over function type")
|
||||||
|
as found in Go 1.23 onwards.
|
||||||
|
|
||||||
|
See the assocaied blog entry for details: https://go.dev/blog/range-functions
|
||||||
|
|
||||||
|
Essentially it allows for e.g.:
|
||||||
|
|
||||||
|
for host := range (Generator).Generate() {
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
which is the "new standard" approach for iteration.
|
||||||
|
*/
|
||||||
|
Generate() (yieldFunc func(yield func(host string) (done bool)))
|
||||||
|
/*
|
||||||
|
Reset is used to reset a Generator, allowing one to "restart" the generation at the beginning.
|
||||||
|
|
||||||
|
Generators in this module are generally single-use, but can be reset/reused with this method.
|
||||||
|
*/
|
||||||
|
Reset()
|
||||||
|
/*
|
||||||
|
Hosts returns a complete generated hostlist at once if you'd rather not iterate.
|
||||||
|
|
||||||
|
Hosts() *does* perform an iteration in runtime, so the recommendation against concurrency
|
||||||
|
stands, but it calls Reset() when done generating to allow other methods of a Generator to be used.
|
||||||
|
*/
|
||||||
|
Hosts() (hostList []string)
|
||||||
|
/*
|
||||||
|
Next and Host behave like more "traditional" iterators, e.g. like (database/sql).Row.Next().
|
||||||
|
|
||||||
|
Next advances the internal state to the next host, and Host() returns it.
|
||||||
|
*/
|
||||||
|
Next() (done bool)
|
||||||
|
/*
|
||||||
|
Host returns the current host string (or "" if done).
|
||||||
|
|
||||||
|
Be sure to e.g.:
|
||||||
|
|
||||||
|
for (Generator).Next() {
|
||||||
|
host := (Generator).Host()
|
||||||
|
}
|
||||||
|
|
||||||
|
otherwise the Host return value will not change.
|
||||||
|
*/
|
||||||
|
Host() (host string)
|
||||||
|
}
|
||||||
|
)
|
14
pdsh/wcoll/docs.go
Normal file
14
pdsh/wcoll/docs.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
/*
|
||||||
|
Package wcoll implements the "default" [WCOLL] method for PDSH. ([source])
|
||||||
|
|
||||||
|
Be sure to read the [HOSTLIST EXPRESSIONS] section in the MAN page.
|
||||||
|
|
||||||
|
# TODO/WIP/Not Yet Implemented
|
||||||
|
|
||||||
|
This package is not yet complete.
|
||||||
|
|
||||||
|
[WCOLL]: https://www.mankier.com/1/pdsh#Environment_Variables
|
||||||
|
[source]: https://github.com/chaos/pdsh/blob/master/src/pdsh/wcoll.c
|
||||||
|
[HOSTLIST EXPRESSIONS]: https://www.mankier.com/1/pdsh#Hostlist_Expressions
|
||||||
|
*/
|
||||||
|
package wcoll
|
53
types_linux.go
Normal file
53
types_linux.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package sysutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
`golang.org/x/sys/unix`
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
IDState collects information about the current running process.
|
||||||
|
It should only be used as returned from GetIDState().
|
||||||
|
Its methods WILL return false information if any of these values are altered.
|
||||||
|
|
||||||
|
FSUID/FSGID are not supported.
|
||||||
|
*/
|
||||||
|
type IDState struct {
|
||||||
|
// RUID: Real UID
|
||||||
|
RUID int
|
||||||
|
// EUID: Effective UID
|
||||||
|
EUID int
|
||||||
|
// SUID: Saved Set UID
|
||||||
|
SUID int
|
||||||
|
// RGID: Real GID
|
||||||
|
RGID int
|
||||||
|
// EGID: Effective GID
|
||||||
|
EGID int
|
||||||
|
// SGID: Saved Set GID
|
||||||
|
SGID int
|
||||||
|
// SudoEnvUser is true if SUDO_USER or SUDO_UID is set.
|
||||||
|
SudoEnvUser bool
|
||||||
|
// SudoEnvGroup is true if SUDO_GID is set.
|
||||||
|
SudoEnvGroup bool
|
||||||
|
// SudoEnvCmd is true if SUDO_COMMAND is set.
|
||||||
|
SudoEnvCmd bool
|
||||||
|
// SudoEnvHome is true if SUDO_HOME is set.
|
||||||
|
SudoEnvHome bool
|
||||||
|
// SudoEnvVars is true if any of the "well-known" sudo environment variables are set.
|
||||||
|
SudoEnvVars bool
|
||||||
|
// PPIDUidMatch is true if the parent PID UID matches the current process UID (mismatch usually indicates sudo invocation).
|
||||||
|
PPIDUidMatch bool
|
||||||
|
// PPIDGidMatch is true if the parent PID GID matches the current process GID (mismatch usually indicates sudo invocation).
|
||||||
|
PPIDGidMatch bool
|
||||||
|
// uidsChecked is true if the RUID, EUID, and SUID have been populated. (They will be 0 if unset OR if root.)
|
||||||
|
uidsChecked bool
|
||||||
|
// gidsChecked is true if the RGID, EGID, and SGID have been populated. (They will be 0 if unset OR if root.)
|
||||||
|
gidsChecked bool
|
||||||
|
// sudoChecked is true if the SudoEnvVars is set.
|
||||||
|
sudoChecked bool
|
||||||
|
// ppidUidChecked is true if the PPIDUidMatch is set.
|
||||||
|
ppidUidChecked bool
|
||||||
|
// ppidGidChecked is true if the PPIDGidMatch is set.
|
||||||
|
ppidGidChecked bool
|
||||||
|
// stat holds the stat information for the parent PID.
|
||||||
|
stat *unix.Stat_t
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user