2 Commits

Author SHA1 Message Date
brent saner
145c32268e v1.14.0
ADDED:
* iox package
* mapsx package
* netx/inetcksum package
2025-12-18 04:47:31 -05:00
brent saner
6ddfcdb416 v1.13.0
ADDED:
* stringsx functions
2025-11-30 16:53:56 -05:00
22 changed files with 982 additions and 221 deletions

10
go.mod
View File

@@ -1,16 +1,16 @@
module r00t2.io/goutils module r00t2.io/goutils
go 1.24.5 go 1.25
require ( require (
github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/go-systemd/v22 v22.6.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
golang.org/x/sys v0.34.0 golang.org/x/sys v0.39.0
r00t2.io/sysutils v1.14.0 r00t2.io/sysutils v1.15.1
) )
require ( require (
github.com/djherbis/times v1.6.0 // indirect github.com/djherbis/times v1.6.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/sync v0.19.0 // indirect
) )

17
go.sum
View File

@@ -1,16 +1,15 @@
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
r00t2.io/sysutils v1.14.0 h1:Lrio3uPi9CuUdg+sg3WkVV1CK/qcOpV9GdFCGFG1KJs= r00t2.io/sysutils v1.15.0 h1:FSnREfbXDhBQEO7LMpnRQeKlPshozxk9XHw3YgWRgRg=
r00t2.io/sysutils v1.14.0/go.mod h1:ZJ7gZxFVQ7QIokQ5fPZr7wl0XO5Iu+LqtE8j3ciRINw= r00t2.io/sysutils v1.15.0/go.mod h1:28qB0074EIRQ8Sy/ybaA5jC3qA32iW2aYLkMCRhyAFM=

View File

@@ -1,4 +1,7 @@
/* /*
Package iox includes extensions to the stdlib `io` module. Package iox includes extensions to the stdlib `io` module.
Not everything in here is considered fully stabilized yet,
but it should be usable.
*/ */
package iox package iox

View File

@@ -5,5 +5,13 @@ import (
) )
var ( var (
ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0") ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0")
ErrChunkTooBig error = errors.New("chunk too big for method")
ErrChunkTooSmall error = errors.New("chunk too small for buffer")
ErrInvalidChunkSize error = errors.New("an invalid chunk size was passed")
ErrNilCtx error = errors.New("a nil context was passed")
ErrNilReader error = errors.New("a nil reader was passed")
ErrNilWriter error = errors.New("a nil writer was passed")
ErrShortRead error = errors.New("a read was cut short with no EOF")
ErrShortWrite error = errors.New("a write was cut short with no error")
) )

View File

@@ -1,20 +1,21 @@
package iox package iox
import ( import (
`context`
`io` `io`
) )
/* /*
CopyBufN is a mix between io.CopyN and io.CopyBuffer. CopyBufN is a mix between [io.CopyN] and [io.CopyBuffer].
Despite what the docs may suggest, io.CopyN does NOT *read* n bytes from src AND write n bytes to dst. Despite what the docs may suggest, [io.CopyN] does NOT *read* n bytes from src AND write n bytes to dst.
Instead, it always reads 32 KiB from src, and writes n bytes to dst. Instead, it always reads 32 KiB from src, and writes n bytes to dst.
There are, of course, cases where this is deadfully undesired. There are cases where this is dreadfully undesired.
One can, of course, use io.CopyBuffer, but this is a bit annoying since you then have to provide a buffer yourself. One can, of course, use [io.CopyBuffer], but this is a bit annoying since you then have to provide a buffer yourself.
This convenience-wraps io.CopyBuffer to have a similar signature to io.CopyN but properly uses n for both reading and writing. This convenience-wraps [io.CopyBuffer] to have a similar signature to [io.CopyN] but properly uses n for both reading and writing.
*/ */
func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) { func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
@@ -32,10 +33,215 @@ func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
return return
} }
// CopyBufWith allows for specifying a buffer allocator function, otherwise acts as CopyBufN. // CopyCtxBufN copies from `src` to `dst`, `n` bytes at a time, interruptible by `ctx`.
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { func CopyCtxBufN(ctx context.Context, dst io.Writer, src io.Reader, n int64) (written int64, err error) {
written, err = io.CopyBuffer(dst, src, bufFunc()) var nr int
var nw int
var end bool
var buf []byte
if ctx == nil {
err = ErrNilCtx
return
}
if n <= 0 {
err = ErrBufTooSmall
return
}
endCopy:
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
buf = make([]byte, n)
nr, err = src.Read(buf)
if err == io.EOF {
err = nil
end = true
} else if err != nil {
return
}
buf = buf[:nr]
if nw, err = dst.Write(buf); err != nil {
written += int64(nw)
return
}
written += int64(nw)
if len(buf) != nw {
err = io.ErrShortWrite
return
}
if end {
break endCopy
}
}
}
return return
} }
/*
CopyBufWith allows for specifying a buffer allocator function, otherwise acts as [CopyBufN].
bufFunc *MUST NOT* return a nil or len == 0 buffer. [ErrBufTooSmall] will be returned if it does.
This uses a fixed buffer size from a single call to `bufFunc`.
If you need something with dynamic buffer sizing according to some state, use [CopyBufWithDynamic] instead.
(Note that CopyBufWithDynamic is generally a little slower, but it should only be noticeable on very large amounts of data.)
*/
func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
var buf []byte = bufFunc()
if buf == nil || len(buf) == 0 {
err = ErrBufTooSmall
return
}
written, err = io.CopyBuffer(dst, src, buf)
return
}
/*
CopyBufWithDynamic is like [CopyBufWith] except it will call bufFunc after each previous buffer is written.
That is to say (using a particularly contrived example):
import time
func dynBuf() (b []byte) {
var t time.Time = time.Now()
b = make([]byte, t.Seconds())
return
}
Then:
CopyBufWithDynamic(w, r, dynBuf)
will use a buffer sized to the seconds of the time it reads in/writes out the next buffer, whereas with [CopyBufWith]:
CopyBufWith(w, r, dynBuf)
would use a *fixed* buffer size of whatever the seconds was equal to at the time of the *first call* to dynBuf.
`src` MUST return an [io.EOF] when its end is reached, but (as per e.g. [io.CopyBuffer]) the io.EOF error will not
be returned from CopyBufWithDynamic. (Any/all other errors encountered will be returned, however, and copying will
immediately cease.)
*/
func CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
var nr int
var nw int
var end bool
var buf []byte
for {
buf = bufFunc()
if buf == nil || len(buf) == 0 {
err = ErrBufTooSmall
return
}
nr, err = src.Read(buf)
if err == io.EOF {
err = nil
end = true
} else if err != nil {
return
}
buf = buf[:nr]
if nw, err = dst.Write(buf); err != nil {
written += int64(nw)
return
}
written += int64(nw)
if len(buf) != nw {
err = ErrShortWrite
return
}
if end {
break
}
}
return
}
// NewChunker returns a [ChunkLocker] ready to use.
func NewChunker(chunkSize uint) (c *ChunkLocker, err error) {
c = &ChunkLocker{}
err = c.SetChunkLen(chunkSize)
return
}
// NewCtxIO returns a [CtxIO].
func NewCtxIO(ctx context.Context, r io.Reader, w io.Writer, chunkSize uint) (c *CtxIO, err error) {
if r == nil {
err = ErrNilReader
return
}
if w == nil {
err = ErrNilWriter
return
}
if chunkSize == 0 {
err = ErrInvalidChunkSize
return
}
if ctx == nil {
err = ErrNilCtx
return
}
c = &CtxIO{
r: r,
w: w,
l: ChunkLocker{
chunkLen: chunkSize,
},
ctx: ctx,
}
return
}
/*
NewXIO returns a nil [XIO].
A weird "feature" of Golang is that a nil XIO is perfectly fine to use;
it's completely stateless and only has pointer receivers that only work with passed in
values so `new(XIO)` is completely unnecessary (as is NewXCopier).
In other words, this works fine:
var xc *iox.XIO
if n, err = xc.Copy(w, r); err != nil {
return
}
This function is just to maintain cleaner-looking code if you should so need it,
or want an XIO without declaring one:
if n, err = iox.NewXCopier().Copy(w, r); err != nil {
return
}
*/
func NewXIO() (x *XIO) {
// No-op lel
return
}

28
iox/funcs_chunklocker.go Normal file
View File

@@ -0,0 +1,28 @@
package iox
// GetChunkLen returns the current chunk size/length in bytes.
func (c *ChunkLocker) GetChunkLen() (size uint) {
c.lock.RLock()
defer c.lock.RUnlock()
size = c.chunkLen
return
}
// SetChunkLen sets the current chunk size/length in bytes.
func (c *ChunkLocker) SetChunkLen(size uint) (err error) {
if size == 0 {
err = ErrInvalidChunkSize
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.chunkLen = size
return
}

173
iox/funcs_ctxio.go Normal file
View File

@@ -0,0 +1,173 @@
package iox
import (
`bytes`
`context`
`io`
`math`
)
func (c *CtxIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
}
return CopyCtxBufN(c.ctx, dst, src, int64(c.l.chunkLen))
}
func (c *CtxIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
if n <= 0 {
err = ErrBufTooSmall
return
}
return CopyCtxBufN(c.ctx, dst, src, n)
}
func (c *CtxIO) GetChunkLen() (size uint) {
return c.l.GetChunkLen()
}
func (c *CtxIO) Read(p []byte) (n int, err error) {
var nr int64
if nr, err = c.ReadWithContext(c.ctx, p); err != nil {
if nr > math.MaxInt {
n = math.MaxInt
} else {
n = int(nr)
}
return
}
if nr > math.MaxInt {
n = math.MaxInt
} else {
n = int(nr)
}
return
}
func (c *CtxIO) ReadWithContext(ctx context.Context, p []byte) (n int64, err error) {
var nr int
var off int
var buf []byte
if p == nil || len(p) == 0 {
return
}
if c.buf.Len() == 0 {
err = io.EOF
return
}
if c.l.chunkLen > uint(len(p)) {
// Would normally be a single chunk, so one-shot it.
nr, err = c.buf.Read(p)
n = int64(nr)
return
}
// Chunk over it.
endRead:
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
/*
off(set) is the index of the *next position* to write to.
Therefore the last offset == len(p),
therefore:
* if off == len(p), "done" (return no error, do *not* read from buf)
* if off + c.l.chunkLen > len(p), buf should be len(p) - off instead
*/
if off == len(p) {
break endRead
}
if uint(off)+c.l.chunkLen > uint(len(p)) {
buf = make([]byte, len(p)-off)
} else {
buf = make([]byte, c.l.chunkLen)
}
nr, err = c.buf.Read(buf)
n += int64(nr)
if nr > 0 {
off += nr
copy(p[off:], buf[:nr])
}
if err == io.EOF {
break endRead
} else if err != nil {
return
}
}
}
return
}
func (c *CtxIO) SetChunkLen(size uint) (err error) {
return c.l.SetChunkLen(size)
}
func (c *CtxIO) SetContext(ctx context.Context) (err error) {
if ctx == nil {
err = ErrNilCtx
return
}
c.ctx = ctx
return
}
func (c *CtxIO) Write(p []byte) (n int, err error) {
var nw int64
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
return
}
if nw, err = c.WriteNWithContext(c.ctx, p, int64(c.l.chunkLen)); err != nil {
if nw > math.MaxInt {
n = math.MaxInt
} else {
n = int(nw)
}
return
}
if nw > math.MaxInt {
n = math.MaxInt
} else {
n = int(nw)
}
return
}
func (c *CtxIO) WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error) {
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), n)
}
func (c *CtxIO) WriteRune(r rune) (n int, err error) {
// We don't even bother listening for the ctx.Done because it's a single rune.
n, err = c.buf.WriteRune(r)
return
}
func (c *CtxIO) WriteWithContext(ctx context.Context, p []byte) (n int64, err error) {
if c.l.chunkLen > math.MaxInt64 {
err = ErrChunkTooBig
return
}
return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), int64(c.l.chunkLen))
}

40
iox/funcs_xio.go Normal file
View File

@@ -0,0 +1,40 @@
package iox
import (
`io`
)
// Copy copies [io.Reader] `src` to [io.Writer] `dst`. It implements [Copier].
func (x *XIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) {
return io.Copy(dst, src)
}
// CopyBuffer copies [io.Reader] `src` to [io.Writer] `dst` using buffer `buf`. It implements [CopyBufferer].
func (x *XIO) CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
return io.CopyBuffer(dst, src, buf)
}
// CopyBufWith copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc`. It implements [SizedCopyBufferInvoker].
func (x *XIO) CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
return CopyBufWith(dst, src, bufFunc)
}
// CopyBufWithDynamic copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc` for each chunk. It implements [DynamicSizedCopyBufferInvoker].
func (x *XIO) CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) {
return CopyBufWithDynamic(dst, src, bufFunc)
}
/*
CopyBufN reads buffered bytes from [io.Reader] `src` and copies to [io.Writer] `dst`
using the synchronous buffer size `n`.
It implements [SizedCopyBufferer].
*/
func (x *XIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
return CopyBufN(dst, src, n)
}
// CopyN copies from [io.Reader] `src` to [io.Writer] `w`, `n` bytes at a time. It implements [SizedCopier].
func (x *XIO) CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
return io.CopyN(dst, src, n)
}

View File

@@ -1,8 +1,209 @@
package iox package iox
import (
`bytes`
`context`
`io`
`sync`
)
type ( type (
// RuneWriter matches the behavior of *(bytes.Buffer).WriteRune and *(bufio.Writer).WriteRune /*
RuneWriter matches the behavior of [bytes.Buffer.WriteRune] and [bufio.Writer.WriteRune].
(Note that this package does not have a "RuneReader"; see [io.RuneReader] instead.)
*/
RuneWriter interface { RuneWriter interface {
WriteRune(r rune) (n int, err error) WriteRune(r rune) (n int, err error)
} }
// Copier matches the signature/behavior of [io.Copy]. Implemented by [XIO].
Copier interface {
Copy(dst io.Writer, src io.Reader) (written int64, err error)
}
// CopyBufferer matches the signature/behavior of [io.CopyBuffer]. Implemented by [XIO].
CopyBufferer interface {
CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error)
}
// SizedCopier matches the signature/behavior of [io.CopyN]. Implemented by [XIO].
SizedCopier interface {
CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
}
// SizedCopyBufferer matches the signature/behavior of [CopyBufN]. Implemented by [XIO].
SizedCopyBufferer interface {
CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error)
}
// SizedCopyBufferInvoker matches the signature/behavior of [CopyBufWith]. Implemented by [XIO].
SizedCopyBufferInvoker interface {
CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
}
// DynamicSizedCopyBufferInvoker matches the signature/behavior of [CopyBufWithDynamic]. Implemented by [XIO].
DynamicSizedCopyBufferInvoker interface {
CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error)
}
/*
Chunker is used by both [ContextReader] and [ContextWriter] to set/get the current chunk size.
Chunking is inherently required to be specified in order to interrupt reads/writes/copies with a [context.Context].
Implementations *must* use a [sync.RWMutex] to get (RLock) and set (Lock) the chunk size.
The chunk size *must not* be directly accessible to maintain concurrency safety assumptions.
*/
Chunker interface {
// GetChunkLen returns the current chunk size/length in bytes.
GetChunkLen() (size uint)
// SetChunkLen sets the current chunk size/length in bytes.
SetChunkLen(size uint) (err error)
}
/*
ChunkReader implements a chunking reader.
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
The Read method should read in chunks of the internal chunk size.
*/
ChunkReader interface {
io.Reader
Chunker
}
/*
ChunkWriter implements a chunking writer.
Third-party implementations *must* respect the chunk size locking (see [Chunker]).
The Write method should write out in chunks of the internal chunk size.
*/
ChunkWriter interface {
io.Writer
Chunker
}
// ChunkReadWriter implements a chunking reader/writer.
ChunkReadWriter interface {
ChunkReader
ChunkWriter
}
/*
ContextSetter allows one to set an internal context.
A nil context should return an error.
*/
ContextSetter interface {
SetContext(context context.Context) (err error)
}
/*
ContextCopier is defined to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Copy method should use an internal context and chunk size
(and thus wrap [CopyCtxBufN] internally on an external call to Copy, etc.).
*/
ContextCopier interface {
Copier
Chunker
ContextSetter
SizedCopyBufferer
}
/*
ContextReader is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Read method should use an internal context and chunk size.
The ReadWithContext method should use an internal chunk size.
*/
ContextReader interface {
ChunkReader
ContextSetter
ReadWithContext(ctx context.Context, p []byte) (n int64, err error)
}
/*
ContextWriter is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type.
The Write method should use an internal context.
The WriteWithContext should use an internal chunk size.
*/
ContextWriter interface {
ChunkWriter
ContextSetter
WriteWithContext(ctx context.Context, p []byte) (n int64, err error)
WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error)
}
/*
ContextReadWriter is primarily here to allow for consumer-provided types.
See [CtxIO] for a package-provided type.
*/
ContextReadWriter interface {
ContextReader
ContextWriter
}
)
type (
// ChunkLocker implements [Chunker].
ChunkLocker struct {
lock sync.RWMutex
chunkLen uint
}
/*
CtxIO is a type used to demonstrate "stateful" I/O introduced by this package.
It implements:
* [Copier]
* [Chunker]
* [RuneWriter]
* [ChunkReader]
* [ChunkWriter]
* [ContextCopier]
* [ContextSetter]
* [ContextReader]
* [ContextWriter]
* [ChunkReadWriter]
* [ContextReadWriter]
* [SizedCopyBufferer]
Unlike [XIO], it must be non-nil (see [NewCtxIO]) since it maintains state
(though technically, one does not need to call [NewCtxIO] if they call
[CtxIO.SetChunkLen] and [CtxIO.SetContext] before any other methods).
[CtxIO.Read] and other Read methods writes to an internal buffer,
and [CtxIO.Write] and other Write methods writes out from it.
*/
CtxIO struct {
r io.Reader
w io.Writer
l ChunkLocker
buf bytes.Buffer
ctx context.Context
}
/*
XIO is a type used to demonstrate "stateless" I/O introduced by this package.
It implements:
* [Copier]
* [CopyBufferer]
* [SizedCopier]
* [SizedCopyBufferer]
* [SizedCopyBufferInvoker]
* [DynamicSizedCopyBufferInvoker]
Unlike [CtxIO], the zero-value is ready to use since it holds no state
or configuration whatsoever.
A nil XIO is perfectly usable but if you want something more idiomatic,
see [NewXIO].
*/
XIO struct{}
) )

4
mapsx/doc.go Normal file
View File

@@ -0,0 +1,4 @@
/*
Package mapsx includes functions that probably should have been in [maps] but aren't.
*/
package mapsx

9
mapsx/errs.go Normal file
View File

@@ -0,0 +1,9 @@
package mapsx
import (
`errors`
)
var (
ErrNotFound = errors.New("key not found")
)

43
mapsx/funcs.go Normal file
View File

@@ -0,0 +1,43 @@
package mapsx
/*
Get mimics Python's [dict.get()] behavior, returning value `v` if key `k`
is not found in map `m`.
See also [GetOk], [Must].
[dict.get()]: https://docs.python.org/3/library/stdtypes.html#dict.get
*/
func Get[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V) {
val, _ = GetOk(m, k, v)
return
}
// GetOk is like [Get] but also explicitly indicates whether `k` was found or not. See also [Must].
func GetOk[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V, found bool) {
if val, found = m[k]; !found {
val = v
}
return
}
/*
Must, unlike [Get] or [GetOk], requires that `k` be in map `m`.
A panic with error [ErrNotFound] will be raised if `k` is not present.
Otherwise the found value will be returned.
*/
func Must[Map ~map[K]V, K comparable, V any](m Map, k K) (val V) {
var ok bool
if val, ok = m[k]; !ok {
panic(ErrNotFound)
}
return
}

View File

@@ -10,11 +10,20 @@ const (
) )
const ( const (
// cksumMask is AND'd with a checksum to get the "carried ones". /*
cksumMask is AND'd with a checksum to get the "carried ones"
(the lower 16 bits before folding carries).
*/
cksumMask uint32 = 0x0000ffff cksumMask uint32 = 0x0000ffff
// cksumShift is used in the "carried-ones folding". /*
cksumShift is used in the "carried-ones folding";
it's the number of bits to right-shift the carry-over.
*/
cksumShift uint32 = 0x00000010 cksumShift uint32 = 0x00000010
// padShift is used to "pad out" a checksum for odd-length buffers by left-shifting. /*
padShift is used to "pad out" a checksum for odd-length buffers by left-shifting.
It positions the high-byte of a 16-byte "word" (big-endian, as per ord below).
*/
padShift uint32 = 0x00000008 padShift uint32 = 0x00000008
) )

View File

@@ -25,6 +25,9 @@ safety and no data retention, which can be used as a:
* [io.StringWriter] * [io.StringWriter]
* [io.Writer] * [io.Writer]
If you don't need all these interfaces, a reasonable alternative may be
to use gVisor's [gvisor.dev/gvisor/pkg/tcpip/checksum] instead.
[RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071 [RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071
[RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141 [RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141
[RFC 1624]: https://datatracker.ietf.org/doc/html/rfc1624 [RFC 1624]: https://datatracker.ietf.org/doc/html/rfc1624

View File

@@ -7,8 +7,9 @@ import (
// New returns a new initialized [InetChecksum]. It will never panic. // New returns a new initialized [InetChecksum]. It will never panic.
func New() (i *InetChecksum) { func New() (i *InetChecksum) {
i = &InetChecksum{} i = &InetChecksum{
_ = i.Aligned() aligned: true,
}
return return
} }
@@ -21,15 +22,14 @@ b may be nil or 0-length; this will not cause an error.
func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) { func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) {
var cksum InetChecksum var cksum InetChecksum
var cptr *InetChecksum = &cksum
cksum.aligned = true
if b != nil && len(b) > 0 { if b != nil && len(b) > 0 {
if copied, err = cksum.Write(b); err != nil { if copied, err = cptr.Write(b); err != nil {
return return
} }
_ = i.Aligned()
} else {
i = New()
return
} }
i = &cksum i = &cksum
@@ -48,7 +48,64 @@ func NewFromBuf(buf io.Reader) (i *InetChecksum, copied int64, err error) {
var cksum InetChecksum var cksum InetChecksum
_ = i.Aligned() cksum.aligned = true
if buf != nil {
if copied, err = io.Copy(&cksum, buf); err != nil {
return
}
}
i = &cksum
return
}
// NewSimple returns a new initialized [InetChecksumSimple]. It will never panic.
func NewSimple() (i *InetChecksumSimple) {
i = &InetChecksumSimple{
aligned: true,
}
return
}
/*
NewSimpleFromBytes returns a new [InetChecksumSimple] initialized with explicit bytes.
b may be nil or 0-length; this will not cause an error.
*/
func NewSimpleFromBytes(b []byte) (i *InetChecksumSimple, copied int, err error) {
var cksum InetChecksumSimple
var cptr *InetChecksumSimple = &cksum
cksum.aligned = true
if b != nil && len(b) > 0 {
if copied, err = cptr.Write(b); err != nil {
return
}
}
i = &cksum
return
}
/*
NewSimpleFromBuf returns an [InetChecksumSimple] from a specified [io.Reader].
buf may be nil. If it isn't, NewSimpleFromBuf will call [io.Copy] on buf.
Note that this may exhaust your passed buf or advance its current seek position/offset,
depending on its type.
*/
func NewSimpleFromBuf(buf io.Reader) (i *InetChecksumSimple, copied int64, err error) {
var cksum InetChecksumSimple
cksum.aligned = true
if buf != nil { if buf != nil {
if copied, err = io.Copy(&cksum, buf); err != nil { if copied, err = io.Copy(&cksum, buf); err != nil {

View File

@@ -22,7 +22,7 @@ func (i *InetChecksum) Aligned() (aligned bool) {
defer i.alignLock.Unlock() defer i.alignLock.Unlock()
i.bufLock.RLock() i.bufLock.RLock()
aligned = i.buf.Len()&2 == 0 aligned = i.buf.Len()%2 == 0
i.bufLock.RUnlock() i.bufLock.RUnlock()
i.aligned = aligned i.aligned = aligned
@@ -113,7 +113,7 @@ func (i *InetChecksum) Reset() {
i.sumLock.Lock() i.sumLock.Lock()
i.lastLock.Lock() i.lastLock.Lock()
i.aligned = false i.aligned = true
i.alignLock.Unlock() i.alignLock.Unlock()
i.buf.Reset() i.buf.Reset()
@@ -308,7 +308,7 @@ func (i *InetChecksum) WriteByte(c byte) (err error) {
} }
if !i.disabledBuf { if !i.disabledBuf {
if err = i.WriteByte(c); err != nil { if err = i.buf.WriteByte(c); err != nil {
i.sum = origSum i.sum = origSum
i.aligned = origAligned i.aligned = origAligned
i.last = origLast i.last = origLast

View File

@@ -27,7 +27,7 @@ func (i *InetChecksumSimple) Reset() {
i.last = 0x00 i.last = 0x00
i.sum = 0 i.sum = 0
i.last = 0x00 i.aligned = true
} }

View File

@@ -17,8 +17,8 @@ type (
If [InetChecksum.Aligned] returns false, the checksum result of an If [InetChecksum.Aligned] returns false, the checksum result of an
[InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation [InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation
returning a sum) will INCLUDE THE PAD NULL BYTE (which is only returning a sum) will INCLUDE THE PAD NULL BYTE (which is only
applied *at the time of the Sum/Sum32 call) and is NOT applied to applied *at the time of the Sum/Sum32 call* and is NOT applied to
the persistent underlying storage. the persistent underlying storage).
InetChecksum differs from [InetChecksumSimple] in that it: InetChecksum differs from [InetChecksumSimple] in that it:

View File

@@ -4,8 +4,3 @@ const (
// DefMaskStr is the string used as the default maskStr if left empty in [Redact]. // DefMaskStr is the string used as the default maskStr if left empty in [Redact].
DefMaskStr string = "***" DefMaskStr string = "***"
) )
const (
// DefIndentStr is the string used as the default indent if left empty in [Indent].
DefIndentStr string = "\t"
)

View File

@@ -1,4 +1,17 @@
/* /*
Package stringsx aims to extend functionality of the stdlib [strings] module. Package stringsx aims to extend functionality of the stdlib [strings] module.
Note that if you need a way of mimicking Bash's shell quoting rules, [desertbit/shlex] or [buildkite/shellwords]
would be better options than [google/shlex] but this package does not attempt to reproduce
any of that functionality.
For line splitting, one should use [muesli/reflow/wordwrap].
Likewise for indentation, one should use [muesli/reflow/indent].
[desertbit/shlex]: https://pkg.go.dev/github.com/desertbit/go-shlex
[buildkite/shellwords]: https://pkg.go.dev/github.com/buildkite/shellwords
[google/shlex]: https://pkg.go.dev/github.com/google/shlex
[muesli/reflow/wordwrap]: https://pkg.go.dev/github.com/muesli/reflow/wordwrap
[muesli/reflow/indent]: https://pkg.go.dev/github.com/muesli/reflow/indent
*/ */
package stringsx package stringsx

View File

@@ -1,96 +1,170 @@
package stringsx package stringsx
import ( import (
`fmt`
`strings` `strings`
`unicode` `unicode`
) )
/* /*
Indent takes string s and indents it with string `indent` `level` times. LenSplit formats string `s` to break at, at most, every `width` characters.
If indent is an empty string, [DefIndentStr] will be used. Any existing newlines (e.g. \r\n) will be removed during a string/
substring/line's length calculation. (e.g. `foobarbaz\n` and `foobarbaz\r\n` are
both considered to be lines of length 9, not 10 and 11 respectively).
If ws is true, lines consisting of only whitespace will be indented as well. This also means that any newlines (\n or \r\n) are inherently removed from
(To then trim any extraneous trailing space, you may want to use [TrimSpaceRight] `out` (even if included in `wordWrap`; see below).
or [TrimLines].)
If empty is true, lines with no content will be replaced with lines that purely Note that if `s` is multiline (already contains newlines), they will be respected
consist of (indent * level) (otherwise they will be left as empty lines). as-is - that is, if a line ends with less than `width` chars and then has a newline,
it will be preserved as an empty element. That is to say:
This function can also be used to prefix lines with arbitrary strings as well. "foo\nbar\n\n" → []string{"foo", "bar", ""}
e.g: "foo\n\nbar\n" → []string{"foo", "", "bar"}
Indent("foo\nbar\nbaz\n", "# ", 1, false, false) This splitter is particularly simple. If you need wordwrapping, it should be done
with e.g. [github.com/muesli/reflow/wordwrap].
would yield:
# foo
# bar
# baz
<empty line>
thus allowing you to "comment out" multiple lines at once.
*/ */
func Indent(s, indent string, level uint, ws, empty bool) (indented string) { func LenSplit(s string, width uint) (out []string) {
var i string var end int
var nl string var line string
var endsNewline bool var lineRunes []rune
var sb strings.Builder
var lineStripped string
if indent == "" { if width == 0 {
indent = DefIndentStr out = []string{s}
}
// This condition functionally won't do anything, so just return the input as-is.
if level == 0 {
indented = s
return return
} }
i = strings.Repeat(indent, int(level)) for line = range strings.Lines(s) {
line = strings.TrimRight(line, "\n")
line = strings.TrimRight(line, "\r")
// This condition functionally won't do anything, so just return the input as-is. lineRunes = []rune(line)
if s == "" {
if empty {
indented = i
}
return
}
for line := range strings.Lines(s) { if uint(len(lineRunes)) <= width {
lineStripped = strings.TrimSpace(line) out = append(out, line)
nl = getNewLine(line)
endsNewline = nl != ""
// fmt.Printf("%#v => %#v\n", line, lineStripped)
if lineStripped == "" {
// fmt.Printf("WS/EMPTY LINE (%#v) (ws %v, empty %v): \n", s, ws, empty)
if line != (lineStripped + nl) {
// whitespace-only line
if ws {
sb.WriteString(i)
}
} else {
// empty line
if empty {
sb.WriteString(i)
}
}
sb.WriteString(line)
continue continue
} }
// non-empty/non-whitespace-only line.
sb.WriteString(i + line) for i := 0; i < len(lineRunes); i += int(width) {
end = i + int(width)
if end > len(lineRunes) {
end = len(lineRunes)
}
out = append(out, string(lineRunes[i:end]))
}
} }
// If it ends with a trailing newline and nothing after, strings.Lines() will skip the last (empty) line. return
if endsNewline && empty { }
nl = getNewLine(s)
sb.WriteString(i) /*
LenSplitStr wraps [LenSplit] but recombines into a new string with newlines.
It's mostly just a convenience wrapper.
All arguments remain the same as in [LenSplit] with an additional one,
`winNewLine`, which if true will use \r\n as the newline instead of \n.
*/
func LenSplitStr(s string, width uint, winNewline bool) (out string) {
var outSl []string = LenSplit(s, width)
if winNewline {
out = strings.Join(outSl, "\r\n")
} else {
out = strings.Join(outSl, "\n")
} }
indented = sb.String() return
}
/*
Pad pads each element in `s` to length `width` using `pad`.
If `pad` is empty, a single space (0x20) will be assumed.
Note that `width` operates on rune size, not byte size.
(In ASCII, they will be the same size.)
If a line in `s` is greater than or equal to `width`,
no padding will be performed.
If `leftPad` is true, padding will be applied to the "left" (beginning")
of each element instead of the "right" ("end").
*/
func Pad(s []string, width uint, pad string, leftPad bool) (out []string) {
var idx int
var padIdx int
var runeIdx int
var padLen uint
var elem string
var unpadLen uint
var tmpPadLen int
var padRunes []rune
var tmpPad []rune
if width == 0 {
out = s
return
}
out = make([]string, len(s))
// Easy; supported directly in fmt.
if pad == "" {
for idx, elem = range s {
if leftPad {
out[idx] = fmt.Sprintf("%*s", width, elem)
} else {
out[idx] = fmt.Sprintf("%-*s", width, elem)
}
}
return
}
// This gets a little more tricky.
padRunes = []rune(pad)
padLen = uint(len(padRunes))
for idx, elem = range s {
// First we need to know the number of runes in elem.
unpadLen = uint(len([]rune(elem)))
// If it's more than/equal to width, as-is.
if unpadLen >= width {
out[idx] = elem
} else {
// Otherwise, we need to construct/calculate a pad.
if (width-unpadLen)%padLen == 0 {
// Also easy enough.
if leftPad {
out[idx] = fmt.Sprintf("%s%s", strings.Repeat(pad, int((width-unpadLen)/padLen)), elem)
} else {
out[idx] = fmt.Sprintf("%s%s", elem, strings.Repeat(pad, int((width-unpadLen)/padLen)))
}
} else {
// This is where it gets a little hairy.
tmpPad = []rune{}
tmpPadLen = int(width - unpadLen)
idx = 0
padIdx = 0
for runeIdx = range tmpPadLen {
tmpPad[runeIdx] = padRunes[padIdx]
if uint(padIdx) >= padLen {
padIdx = 0
} else {
padIdx++
}
runeIdx++
}
if leftPad {
out[idx] = fmt.Sprintf("%s%s", string(tmpPad), elem)
} else {
out[idx] = fmt.Sprintf("%s%s", elem, string(tmpPad))
}
}
}
}
return return
} }
@@ -118,6 +192,9 @@ As a safety precaution, if:
len(s) <= (leading + trailing) len(s) <= (leading + trailing)
then the entire string will be *masked* and no unmasking will be performed. then the entire string will be *masked* and no unmasking will be performed.
Note that this DOES NOT do a string *replace*, it provides a masked version of `s` itself.
Wrap Redact with [strings.ReplaceAll] if you want to replace a certain value with a masked one.
*/ */
func Redact(s, maskStr string, leading, trailing uint, newlines bool) (redacted string) { func Redact(s, maskStr string, leading, trailing uint, newlines bool) (redacted string) {
@@ -218,7 +295,7 @@ func TrimLines(s string, left, right bool) (trimmed string) {
return return
} }
// TrimSpaceLeft is like [strings.TrimSpace] but only removes leading whitespace from string s. // TrimSpaceLeft is like [strings.TrimSpace] but only removes leading whitespace from string `s`.
func TrimSpaceLeft(s string) (trimmed string) { func TrimSpaceLeft(s string) (trimmed string) {
trimmed = strings.TrimLeftFunc(s, unicode.IsSpace) trimmed = strings.TrimLeftFunc(s, unicode.IsSpace)
@@ -236,7 +313,7 @@ func TrimSpaceRight(s string) (trimmed string) {
return return
} }
// getNewLine is too unpredictable to be used outside of this package so it isn't exported. // getNewLine is too unpredictable/nuanced to be used as part of a public API promise so it isn't exported.
func getNewLine(s string) (nl string) { func getNewLine(s string) (nl string) {
if strings.HasSuffix(s, "\r\n") { if strings.HasSuffix(s, "\r\n") {

View File

@@ -37,113 +37,6 @@ type (
} }
) )
func TestIndent(t *testing.T) {
var out string
var tests []testIndentSet = []testIndentSet{
testIndentSet{
name: "standard, no trailing newline",
orig: "foo\nbar\nbaz",
indent: "",
lvl: 1,
ws: false,
empty: false,
tgt: "\tfoo\n\tbar\n\tbaz",
},
testIndentSet{
name: "standard, trailing newline",
orig: "foo\nbar\nbaz\n",
indent: "",
lvl: 1,
ws: false,
empty: false,
tgt: "\tfoo\n\tbar\n\tbaz\n",
},
testIndentSet{
name: "standard, trailing newline with empty",
orig: "foo\nbar\nbaz\n",
indent: "",
lvl: 1,
ws: false,
empty: true,
tgt: "\tfoo\n\tbar\n\tbaz\n\t",
},
testIndentSet{
name: "standard, trailing newline with ws",
orig: "foo\nbar\nbaz\n",
indent: "",
lvl: 1,
ws: true,
empty: false,
tgt: "\tfoo\n\tbar\n\tbaz\n",
},
testIndentSet{
name: "standard, trailing newline with ws and empty",
orig: "foo\nbar\nbaz\n",
indent: "",
lvl: 1,
ws: true,
empty: true,
tgt: "\tfoo\n\tbar\n\tbaz\n\t",
},
testIndentSet{
name: "standard, trailing ws newline with empty",
orig: "foo\nbar\nbaz\n ",
indent: "",
lvl: 1,
ws: false,
empty: true,
tgt: "\tfoo\n\tbar\n\tbaz\n ",
},
testIndentSet{
name: "standard, trailing ws newline with ws",
orig: "foo\nbar\nbaz\n ",
indent: "",
lvl: 1,
ws: true,
empty: false,
tgt: "\tfoo\n\tbar\n\tbaz\n\t ",
},
testIndentSet{
name: "standard, trailing ws newline with ws and empty",
orig: "foo\nbar\nbaz\n \n",
indent: "",
lvl: 1,
ws: true,
empty: true,
tgt: "\tfoo\n\tbar\n\tbaz\n\t \n\t",
},
testIndentSet{
name: "comment",
orig: "foo\nbar\nbaz",
indent: "# ",
lvl: 1,
ws: false,
empty: false,
tgt: "# foo\n# bar\n# baz",
},
}
for idx, ts := range tests {
out = Indent(ts.orig, ts.indent, ts.lvl, ts.ws, ts.empty)
if out == ts.tgt {
t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out)
} else {
t.Errorf(
"[%d] FAIL (%s): %#v (len %d):\n"+
"\t\t\texpected (len %d): %#v\n"+
"\t\t\tgot (len %d): %#v\n"+
"\t\t%#v",
idx, ts.name, ts.orig, len(ts.orig),
len(ts.tgt), ts.tgt,
len(out), out,
ts,
)
}
}
}
func TestRedact(t *testing.T) { func TestRedact(t *testing.T) {
var out string var out string