From 145c32268ef04801bc029a9f730e14aaa19d9090 Mon Sep 17 00:00:00 2001 From: brent saner Date: Thu, 18 Dec 2025 04:47:31 -0500 Subject: [PATCH] v1.14.0 ADDED: * iox package * mapsx package * netx/inetcksum package --- go.mod | 10 +- go.sum | 17 +- iox/docs.go | 3 + iox/errs.go | 10 +- iox/funcs.go | 222 ++++++++++++++++++++- iox/funcs_chunklocker.go | 28 +++ iox/funcs_ctxio.go | 173 ++++++++++++++++ iox/funcs_xio.go | 40 ++++ iox/types.go | 203 ++++++++++++++++++- mapsx/doc.go | 4 + mapsx/errs.go | 9 + mapsx/funcs.go | 43 ++++ netx/inetcksum/consts.go | 15 +- netx/inetcksum/docs.go | 3 + netx/inetcksum/funcs.go | 73 ++++++- netx/inetcksum/funcs_inetchecksum.go | 6 +- netx/inetcksum/funcs_inetchecksumsimple.go | 2 +- netx/inetcksum/types.go | 4 +- stringsx/funcs_test.go | 107 ---------- 19 files changed, 824 insertions(+), 148 deletions(-) create mode 100644 iox/funcs_chunklocker.go create mode 100644 iox/funcs_ctxio.go create mode 100644 iox/funcs_xio.go create mode 100644 mapsx/doc.go create mode 100644 mapsx/errs.go create mode 100644 mapsx/funcs.go diff --git a/go.mod b/go.mod index 3061af7..7158843 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,16 @@ module r00t2.io/goutils -go 1.24.5 +go 1.25 require ( - github.com/coreos/go-systemd/v22 v22.5.0 + github.com/coreos/go-systemd/v22 v22.6.0 github.com/google/uuid v1.6.0 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/sys v0.34.0 - r00t2.io/sysutils v1.14.0 + golang.org/x/sys v0.39.0 + r00t2.io/sysutils v1.15.1 ) require ( github.com/djherbis/times v1.6.0 // indirect - golang.org/x/sync v0.16.0 // indirect + golang.org/x/sync v0.19.0 // indirect ) diff --git a/go.sum b/go.sum index 2eccd9f..3f65379 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,15 @@ -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -r00t2.io/sysutils v1.14.0 h1:Lrio3uPi9CuUdg+sg3WkVV1CK/qcOpV9GdFCGFG1KJs= -r00t2.io/sysutils v1.14.0/go.mod h1:ZJ7gZxFVQ7QIokQ5fPZr7wl0XO5Iu+LqtE8j3ciRINw= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +r00t2.io/sysutils v1.15.0 h1:FSnREfbXDhBQEO7LMpnRQeKlPshozxk9XHw3YgWRgRg= +r00t2.io/sysutils v1.15.0/go.mod h1:28qB0074EIRQ8Sy/ybaA5jC3qA32iW2aYLkMCRhyAFM= diff --git a/iox/docs.go b/iox/docs.go index a1c3bf1..9af1c81 100644 --- a/iox/docs.go +++ b/iox/docs.go @@ -1,4 +1,7 @@ /* Package iox includes extensions to the stdlib `io` module. + +Not everything in here is considered fully stabilized yet, +but it should be usable. */ package iox diff --git a/iox/errs.go b/iox/errs.go index 659357c..5939217 100644 --- a/iox/errs.go +++ b/iox/errs.go @@ -5,5 +5,13 @@ import ( ) var ( - ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0") + ErrBufTooSmall error = errors.New("buffer too small; buffer size must be > 0") + ErrChunkTooBig error = errors.New("chunk too big for method") + ErrChunkTooSmall error = errors.New("chunk too small for buffer") + ErrInvalidChunkSize error = errors.New("an invalid chunk size was passed") + ErrNilCtx error = errors.New("a nil context was passed") + ErrNilReader error = errors.New("a nil reader was passed") + ErrNilWriter error = errors.New("a nil writer was passed") + ErrShortRead error = errors.New("a read was cut short with no EOF") + ErrShortWrite error = errors.New("a write was cut short with no error") ) diff --git a/iox/funcs.go b/iox/funcs.go index 9104c48..b171ab5 100644 --- a/iox/funcs.go +++ b/iox/funcs.go @@ -1,20 +1,21 @@ package iox import ( + `context` `io` ) /* -CopyBufN is a mix between io.CopyN and io.CopyBuffer. +CopyBufN is a mix between [io.CopyN] and [io.CopyBuffer]. -Despite what the docs may suggest, io.CopyN does NOT *read* n bytes from src AND write n bytes to dst. +Despite what the docs may suggest, [io.CopyN] does NOT *read* n bytes from src AND write n bytes to dst. Instead, it always reads 32 KiB from src, and writes n bytes to dst. -There are, of course, cases where this is deadfully undesired. +There are cases where this is dreadfully undesired. -One can, of course, use io.CopyBuffer, but this is a bit annoying since you then have to provide a buffer yourself. +One can, of course, use [io.CopyBuffer], but this is a bit annoying since you then have to provide a buffer yourself. -This convenience-wraps io.CopyBuffer to have a similar signature to io.CopyN but properly uses n for both reading and writing. +This convenience-wraps [io.CopyBuffer] to have a similar signature to [io.CopyN] but properly uses n for both reading and writing. */ func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) { @@ -32,10 +33,215 @@ func CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) return } -// CopyBufWith allows for specifying a buffer allocator function, otherwise acts as CopyBufN. -func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { +// CopyCtxBufN copies from `src` to `dst`, `n` bytes at a time, interruptible by `ctx`. +func CopyCtxBufN(ctx context.Context, dst io.Writer, src io.Reader, n int64) (written int64, err error) { - written, err = io.CopyBuffer(dst, src, bufFunc()) + var nr int + var nw int + var end bool + var buf []byte + + if ctx == nil { + err = ErrNilCtx + return + } + if n <= 0 { + err = ErrBufTooSmall + return + } + +endCopy: + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + buf = make([]byte, n) + nr, err = src.Read(buf) + if err == io.EOF { + err = nil + end = true + } else if err != nil { + return + } + buf = buf[:nr] + + if nw, err = dst.Write(buf); err != nil { + written += int64(nw) + return + } + written += int64(nw) + if len(buf) != nw { + err = io.ErrShortWrite + return + } + if end { + break endCopy + } + } + } return } + +/* +CopyBufWith allows for specifying a buffer allocator function, otherwise acts as [CopyBufN]. + +bufFunc *MUST NOT* return a nil or len == 0 buffer. [ErrBufTooSmall] will be returned if it does. + +This uses a fixed buffer size from a single call to `bufFunc`. +If you need something with dynamic buffer sizing according to some state, use [CopyBufWithDynamic] instead. +(Note that CopyBufWithDynamic is generally a little slower, but it should only be noticeable on very large amounts of data.) +*/ +func CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { + + var buf []byte = bufFunc() + + if buf == nil || len(buf) == 0 { + err = ErrBufTooSmall + return + } + + written, err = io.CopyBuffer(dst, src, buf) + + return +} + +/* +CopyBufWithDynamic is like [CopyBufWith] except it will call bufFunc after each previous buffer is written. + +That is to say (using a particularly contrived example): + + import time + + func dynBuf() (b []byte) { + + var t time.Time = time.Now() + + b = make([]byte, t.Seconds()) + + return + } + +Then: + + CopyBufWithDynamic(w, r, dynBuf) + +will use a buffer sized to the seconds of the time it reads in/writes out the next buffer, whereas with [CopyBufWith]: + + CopyBufWith(w, r, dynBuf) + +would use a *fixed* buffer size of whatever the seconds was equal to at the time of the *first call* to dynBuf. + +`src` MUST return an [io.EOF] when its end is reached, but (as per e.g. [io.CopyBuffer]) the io.EOF error will not +be returned from CopyBufWithDynamic. (Any/all other errors encountered will be returned, however, and copying will +immediately cease.) +*/ +func CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { + + var nr int + var nw int + var end bool + var buf []byte + + for { + buf = bufFunc() + if buf == nil || len(buf) == 0 { + err = ErrBufTooSmall + return + } + nr, err = src.Read(buf) + if err == io.EOF { + err = nil + end = true + } else if err != nil { + return + } + buf = buf[:nr] + + if nw, err = dst.Write(buf); err != nil { + written += int64(nw) + return + } + written += int64(nw) + if len(buf) != nw { + err = ErrShortWrite + return + } + if end { + break + } + } + + return +} + +// NewChunker returns a [ChunkLocker] ready to use. +func NewChunker(chunkSize uint) (c *ChunkLocker, err error) { + + c = &ChunkLocker{} + err = c.SetChunkLen(chunkSize) + + return +} + +// NewCtxIO returns a [CtxIO]. +func NewCtxIO(ctx context.Context, r io.Reader, w io.Writer, chunkSize uint) (c *CtxIO, err error) { + + if r == nil { + err = ErrNilReader + return + } + if w == nil { + err = ErrNilWriter + return + } + + if chunkSize == 0 { + err = ErrInvalidChunkSize + return + } + + if ctx == nil { + err = ErrNilCtx + return + } + + c = &CtxIO{ + r: r, + w: w, + l: ChunkLocker{ + chunkLen: chunkSize, + }, + ctx: ctx, + } + + return +} + +/* + NewXIO returns a nil [XIO]. + + A weird "feature" of Golang is that a nil XIO is perfectly fine to use; + it's completely stateless and only has pointer receivers that only work with passed in + values so `new(XIO)` is completely unnecessary (as is NewXCopier). + In other words, this works fine: + + var xc *iox.XIO + + if n, err = xc.Copy(w, r); err != nil { + return + } + + This function is just to maintain cleaner-looking code if you should so need it, + or want an XIO without declaring one: + + if n, err = iox.NewXCopier().Copy(w, r); err != nil { + return + } +*/ +func NewXIO() (x *XIO) { + // No-op lel + return +} diff --git a/iox/funcs_chunklocker.go b/iox/funcs_chunklocker.go new file mode 100644 index 0000000..03e37d1 --- /dev/null +++ b/iox/funcs_chunklocker.go @@ -0,0 +1,28 @@ +package iox + +// GetChunkLen returns the current chunk size/length in bytes. +func (c *ChunkLocker) GetChunkLen() (size uint) { + + c.lock.RLock() + defer c.lock.RUnlock() + + size = c.chunkLen + + return +} + +// SetChunkLen sets the current chunk size/length in bytes. +func (c *ChunkLocker) SetChunkLen(size uint) (err error) { + + if size == 0 { + err = ErrInvalidChunkSize + return + } + + c.lock.Lock() + defer c.lock.Unlock() + + c.chunkLen = size + + return +} diff --git a/iox/funcs_ctxio.go b/iox/funcs_ctxio.go new file mode 100644 index 0000000..fc3d0d3 --- /dev/null +++ b/iox/funcs_ctxio.go @@ -0,0 +1,173 @@ +package iox + +import ( + `bytes` + `context` + `io` + `math` +) + +func (c *CtxIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) { + if c.l.chunkLen > math.MaxInt64 { + err = ErrChunkTooBig + } + return CopyCtxBufN(c.ctx, dst, src, int64(c.l.chunkLen)) +} + +func (c *CtxIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) { + if n <= 0 { + err = ErrBufTooSmall + return + } + return CopyCtxBufN(c.ctx, dst, src, n) +} + +func (c *CtxIO) GetChunkLen() (size uint) { + return c.l.GetChunkLen() +} + +func (c *CtxIO) Read(p []byte) (n int, err error) { + + var nr int64 + + if nr, err = c.ReadWithContext(c.ctx, p); err != nil { + if nr > math.MaxInt { + n = math.MaxInt + } else { + n = int(nr) + } + return + } + + if nr > math.MaxInt { + n = math.MaxInt + } else { + n = int(nr) + } + + return +} + +func (c *CtxIO) ReadWithContext(ctx context.Context, p []byte) (n int64, err error) { + + var nr int + var off int + var buf []byte + + if p == nil || len(p) == 0 { + return + } + if c.buf.Len() == 0 { + err = io.EOF + return + } + + if c.l.chunkLen > uint(len(p)) { + // Would normally be a single chunk, so one-shot it. + nr, err = c.buf.Read(p) + n = int64(nr) + return + } + + // Chunk over it. +endRead: + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + /* + off(set) is the index of the *next position* to write to. + Therefore the last offset == len(p), + therefore: + + * if off == len(p), "done" (return no error, do *not* read from buf) + * if off + c.l.chunkLen > len(p), buf should be len(p) - off instead + */ + if off == len(p) { + break endRead + } + if uint(off)+c.l.chunkLen > uint(len(p)) { + buf = make([]byte, len(p)-off) + } else { + buf = make([]byte, c.l.chunkLen) + } + nr, err = c.buf.Read(buf) + n += int64(nr) + if nr > 0 { + off += nr + copy(p[off:], buf[:nr]) + } + if err == io.EOF { + break endRead + } else if err != nil { + return + } + } + } + + return +} + +func (c *CtxIO) SetChunkLen(size uint) (err error) { + return c.l.SetChunkLen(size) +} + +func (c *CtxIO) SetContext(ctx context.Context) (err error) { + + if ctx == nil { + err = ErrNilCtx + return + } + + c.ctx = ctx + + return +} + +func (c *CtxIO) Write(p []byte) (n int, err error) { + + var nw int64 + + if c.l.chunkLen > math.MaxInt64 { + err = ErrChunkTooBig + return + } + if nw, err = c.WriteNWithContext(c.ctx, p, int64(c.l.chunkLen)); err != nil { + if nw > math.MaxInt { + n = math.MaxInt + } else { + n = int(nw) + } + return + } + + if nw > math.MaxInt { + n = math.MaxInt + } else { + n = int(nw) + } + + return +} + +func (c *CtxIO) WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error) { + return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), n) +} + +func (c *CtxIO) WriteRune(r rune) (n int, err error) { + + // We don't even bother listening for the ctx.Done because it's a single rune. + n, err = c.buf.WriteRune(r) + + return +} + +func (c *CtxIO) WriteWithContext(ctx context.Context, p []byte) (n int64, err error) { + if c.l.chunkLen > math.MaxInt64 { + err = ErrChunkTooBig + return + } + return CopyCtxBufN(ctx, &c.buf, bytes.NewReader(p), int64(c.l.chunkLen)) +} diff --git a/iox/funcs_xio.go b/iox/funcs_xio.go new file mode 100644 index 0000000..3da8007 --- /dev/null +++ b/iox/funcs_xio.go @@ -0,0 +1,40 @@ +package iox + +import ( + `io` +) + +// Copy copies [io.Reader] `src` to [io.Writer] `dst`. It implements [Copier]. +func (x *XIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) { + return io.Copy(dst, src) +} + +// CopyBuffer copies [io.Reader] `src` to [io.Writer] `dst` using buffer `buf`. It implements [CopyBufferer]. +func (x *XIO) CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + return io.CopyBuffer(dst, src, buf) +} + +// CopyBufWith copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc`. It implements [SizedCopyBufferInvoker]. +func (x *XIO) CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { + return CopyBufWith(dst, src, bufFunc) +} + +// CopyBufWithDynamic copies [io.Reader] `src` to [io.Writer] `dst` using buffer returner `bufFunc` for each chunk. It implements [DynamicSizedCopyBufferInvoker]. +func (x *XIO) CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) { + return CopyBufWithDynamic(dst, src, bufFunc) +} + +/* +CopyBufN reads buffered bytes from [io.Reader] `src` and copies to [io.Writer] `dst` +using the synchronous buffer size `n`. + +It implements [SizedCopyBufferer]. +*/ +func (x *XIO) CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) { + return CopyBufN(dst, src, n) +} + +// CopyN copies from [io.Reader] `src` to [io.Writer] `w`, `n` bytes at a time. It implements [SizedCopier]. +func (x *XIO) CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) { + return io.CopyN(dst, src, n) +} diff --git a/iox/types.go b/iox/types.go index 7f92ca1..17ddd90 100644 --- a/iox/types.go +++ b/iox/types.go @@ -1,8 +1,209 @@ package iox +import ( + `bytes` + `context` + `io` + `sync` +) + type ( - // RuneWriter matches the behavior of *(bytes.Buffer).WriteRune and *(bufio.Writer).WriteRune + /* + RuneWriter matches the behavior of [bytes.Buffer.WriteRune] and [bufio.Writer.WriteRune]. + + (Note that this package does not have a "RuneReader"; see [io.RuneReader] instead.) + */ RuneWriter interface { WriteRune(r rune) (n int, err error) } + + // Copier matches the signature/behavior of [io.Copy]. Implemented by [XIO]. + Copier interface { + Copy(dst io.Writer, src io.Reader) (written int64, err error) + } + + // CopyBufferer matches the signature/behavior of [io.CopyBuffer]. Implemented by [XIO]. + CopyBufferer interface { + CopyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) + } + + // SizedCopier matches the signature/behavior of [io.CopyN]. Implemented by [XIO]. + SizedCopier interface { + CopyN(dst io.Writer, src io.Reader, n int64) (written int64, err error) + } + + // SizedCopyBufferer matches the signature/behavior of [CopyBufN]. Implemented by [XIO]. + SizedCopyBufferer interface { + CopyBufN(dst io.Writer, src io.Reader, n int64) (written int64, err error) + } + + // SizedCopyBufferInvoker matches the signature/behavior of [CopyBufWith]. Implemented by [XIO]. + SizedCopyBufferInvoker interface { + CopyBufWith(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) + } + + // DynamicSizedCopyBufferInvoker matches the signature/behavior of [CopyBufWithDynamic]. Implemented by [XIO]. + DynamicSizedCopyBufferInvoker interface { + CopyBufWithDynamic(dst io.Writer, src io.Reader, bufFunc func() (b []byte)) (written int64, err error) + } + + /* + Chunker is used by both [ContextReader] and [ContextWriter] to set/get the current chunk size. + Chunking is inherently required to be specified in order to interrupt reads/writes/copies with a [context.Context]. + + Implementations *must* use a [sync.RWMutex] to get (RLock) and set (Lock) the chunk size. + The chunk size *must not* be directly accessible to maintain concurrency safety assumptions. + */ + Chunker interface { + // GetChunkLen returns the current chunk size/length in bytes. + GetChunkLen() (size uint) + // SetChunkLen sets the current chunk size/length in bytes. + SetChunkLen(size uint) (err error) + } + + /* + ChunkReader implements a chunking reader. + Third-party implementations *must* respect the chunk size locking (see [Chunker]). + + The Read method should read in chunks of the internal chunk size. + */ + ChunkReader interface { + io.Reader + Chunker + } + + /* + ChunkWriter implements a chunking writer. + Third-party implementations *must* respect the chunk size locking (see [Chunker]). + + The Write method should write out in chunks of the internal chunk size. + */ + ChunkWriter interface { + io.Writer + Chunker + } + + // ChunkReadWriter implements a chunking reader/writer. + ChunkReadWriter interface { + ChunkReader + ChunkWriter + } + + /* + ContextSetter allows one to set an internal context. + + A nil context should return an error. + */ + ContextSetter interface { + SetContext(context context.Context) (err error) + } + + /* + ContextCopier is defined to allow for consumer-provided types. See [CtxIO] for a package-provided type. + + The Copy method should use an internal context and chunk size + (and thus wrap [CopyCtxBufN] internally on an external call to Copy, etc.). + */ + ContextCopier interface { + Copier + Chunker + ContextSetter + SizedCopyBufferer + } + + /* + ContextReader is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type. + + The Read method should use an internal context and chunk size. + + The ReadWithContext method should use an internal chunk size. + */ + ContextReader interface { + ChunkReader + ContextSetter + ReadWithContext(ctx context.Context, p []byte) (n int64, err error) + } + + /* + ContextWriter is primarily here to allow for consumer-provided types. See [CtxIO] for a package-provided type. + + The Write method should use an internal context. + + The WriteWithContext should use an internal chunk size. + */ + ContextWriter interface { + ChunkWriter + ContextSetter + WriteWithContext(ctx context.Context, p []byte) (n int64, err error) + WriteNWithContext(ctx context.Context, p []byte, n int64) (written int64, err error) + } + + /* + ContextReadWriter is primarily here to allow for consumer-provided types. + + See [CtxIO] for a package-provided type. + */ + ContextReadWriter interface { + ContextReader + ContextWriter + } +) + +type ( + // ChunkLocker implements [Chunker]. + ChunkLocker struct { + lock sync.RWMutex + chunkLen uint + } + + /* + CtxIO is a type used to demonstrate "stateful" I/O introduced by this package. + It implements: + + * [Copier] + * [Chunker] + * [RuneWriter] + * [ChunkReader] + * [ChunkWriter] + * [ContextCopier] + * [ContextSetter] + * [ContextReader] + * [ContextWriter] + * [ChunkReadWriter] + * [ContextReadWriter] + * [SizedCopyBufferer] + + Unlike [XIO], it must be non-nil (see [NewCtxIO]) since it maintains state + (though technically, one does not need to call [NewCtxIO] if they call + [CtxIO.SetChunkLen] and [CtxIO.SetContext] before any other methods). + + [CtxIO.Read] and other Read methods writes to an internal buffer, + and [CtxIO.Write] and other Write methods writes out from it. + */ + CtxIO struct { + r io.Reader + w io.Writer + l ChunkLocker + buf bytes.Buffer + ctx context.Context + } + + /* + XIO is a type used to demonstrate "stateless" I/O introduced by this package. + It implements: + + * [Copier] + * [CopyBufferer] + * [SizedCopier] + * [SizedCopyBufferer] + * [SizedCopyBufferInvoker] + * [DynamicSizedCopyBufferInvoker] + + Unlike [CtxIO], the zero-value is ready to use since it holds no state + or configuration whatsoever. + + A nil XIO is perfectly usable but if you want something more idiomatic, + see [NewXIO]. + */ + XIO struct{} ) diff --git a/mapsx/doc.go b/mapsx/doc.go new file mode 100644 index 0000000..af12a8c --- /dev/null +++ b/mapsx/doc.go @@ -0,0 +1,4 @@ +/* +Package mapsx includes functions that probably should have been in [maps] but aren't. +*/ +package mapsx diff --git a/mapsx/errs.go b/mapsx/errs.go new file mode 100644 index 0000000..3197d63 --- /dev/null +++ b/mapsx/errs.go @@ -0,0 +1,9 @@ +package mapsx + +import ( + `errors` +) + +var ( + ErrNotFound = errors.New("key not found") +) diff --git a/mapsx/funcs.go b/mapsx/funcs.go new file mode 100644 index 0000000..9d0d1df --- /dev/null +++ b/mapsx/funcs.go @@ -0,0 +1,43 @@ +package mapsx + +/* +Get mimics Python's [dict.get()] behavior, returning value `v` if key `k` +is not found in map `m`. + +See also [GetOk], [Must]. + +[dict.get()]: https://docs.python.org/3/library/stdtypes.html#dict.get +*/ +func Get[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V) { + + val, _ = GetOk(m, k, v) + + return +} + +// GetOk is like [Get] but also explicitly indicates whether `k` was found or not. See also [Must]. +func GetOk[Map ~map[K]V, K comparable, V any](m Map, k K, v V) (val V, found bool) { + + if val, found = m[k]; !found { + val = v + } + + return +} + +/* + Must, unlike [Get] or [GetOk], requires that `k` be in map `m`. + + A panic with error [ErrNotFound] will be raised if `k` is not present. + Otherwise the found value will be returned. +*/ +func Must[Map ~map[K]V, K comparable, V any](m Map, k K) (val V) { + + var ok bool + + if val, ok = m[k]; !ok { + panic(ErrNotFound) + } + + return +} diff --git a/netx/inetcksum/consts.go b/netx/inetcksum/consts.go index 0a9af60..84d6f86 100644 --- a/netx/inetcksum/consts.go +++ b/netx/inetcksum/consts.go @@ -10,11 +10,20 @@ const ( ) const ( - // cksumMask is AND'd with a checksum to get the "carried ones". + /* + cksumMask is AND'd with a checksum to get the "carried ones" + (the lower 16 bits before folding carries). + */ cksumMask uint32 = 0x0000ffff - // cksumShift is used in the "carried-ones folding". + /* + cksumShift is used in the "carried-ones folding"; + it's the number of bits to right-shift the carry-over. + */ cksumShift uint32 = 0x00000010 - // padShift is used to "pad out" a checksum for odd-length buffers by left-shifting. + /* + padShift is used to "pad out" a checksum for odd-length buffers by left-shifting. + It positions the high-byte of a 16-byte "word" (big-endian, as per ord below). + */ padShift uint32 = 0x00000008 ) diff --git a/netx/inetcksum/docs.go b/netx/inetcksum/docs.go index 7b8a738..5367b65 100644 --- a/netx/inetcksum/docs.go +++ b/netx/inetcksum/docs.go @@ -25,6 +25,9 @@ safety and no data retention, which can be used as a: * [io.StringWriter] * [io.Writer] +If you don't need all these interfaces, a reasonable alternative may be +to use gVisor's [gvisor.dev/gvisor/pkg/tcpip/checksum] instead. + [RFC 1071]: https://datatracker.ietf.org/doc/html/rfc1071 [RFC 1141]: https://datatracker.ietf.org/doc/html/rfc1141 [RFC 1624]: https://datatracker.ietf.org/doc/html/rfc1624 diff --git a/netx/inetcksum/funcs.go b/netx/inetcksum/funcs.go index 659eb48..915648d 100644 --- a/netx/inetcksum/funcs.go +++ b/netx/inetcksum/funcs.go @@ -7,8 +7,9 @@ import ( // New returns a new initialized [InetChecksum]. It will never panic. func New() (i *InetChecksum) { - i = &InetChecksum{} - _ = i.Aligned() + i = &InetChecksum{ + aligned: true, + } return } @@ -21,15 +22,14 @@ b may be nil or 0-length; this will not cause an error. func NewFromBytes(b []byte) (i *InetChecksum, copied int, err error) { var cksum InetChecksum + var cptr *InetChecksum = &cksum + + cksum.aligned = true if b != nil && len(b) > 0 { - if copied, err = cksum.Write(b); err != nil { + if copied, err = cptr.Write(b); err != nil { return } - _ = i.Aligned() - } else { - i = New() - return } i = &cksum @@ -48,7 +48,64 @@ func NewFromBuf(buf io.Reader) (i *InetChecksum, copied int64, err error) { var cksum InetChecksum - _ = i.Aligned() + cksum.aligned = true + + if buf != nil { + if copied, err = io.Copy(&cksum, buf); err != nil { + return + } + } + + i = &cksum + + return +} + +// NewSimple returns a new initialized [InetChecksumSimple]. It will never panic. +func NewSimple() (i *InetChecksumSimple) { + + i = &InetChecksumSimple{ + aligned: true, + } + + return +} + +/* + NewSimpleFromBytes returns a new [InetChecksumSimple] initialized with explicit bytes. + + b may be nil or 0-length; this will not cause an error. +*/ +func NewSimpleFromBytes(b []byte) (i *InetChecksumSimple, copied int, err error) { + + var cksum InetChecksumSimple + var cptr *InetChecksumSimple = &cksum + + cksum.aligned = true + + if b != nil && len(b) > 0 { + if copied, err = cptr.Write(b); err != nil { + return + } + } + + i = &cksum + + return +} + +/* + NewSimpleFromBuf returns an [InetChecksumSimple] from a specified [io.Reader]. + + buf may be nil. If it isn't, NewSimpleFromBuf will call [io.Copy] on buf. + Note that this may exhaust your passed buf or advance its current seek position/offset, + depending on its type. +*/ +func NewSimpleFromBuf(buf io.Reader) (i *InetChecksumSimple, copied int64, err error) { + + var cksum InetChecksumSimple + + cksum.aligned = true if buf != nil { if copied, err = io.Copy(&cksum, buf); err != nil { diff --git a/netx/inetcksum/funcs_inetchecksum.go b/netx/inetcksum/funcs_inetchecksum.go index 4ab3ab2..3ec5d51 100644 --- a/netx/inetcksum/funcs_inetchecksum.go +++ b/netx/inetcksum/funcs_inetchecksum.go @@ -22,7 +22,7 @@ func (i *InetChecksum) Aligned() (aligned bool) { defer i.alignLock.Unlock() i.bufLock.RLock() - aligned = i.buf.Len()&2 == 0 + aligned = i.buf.Len()%2 == 0 i.bufLock.RUnlock() i.aligned = aligned @@ -113,7 +113,7 @@ func (i *InetChecksum) Reset() { i.sumLock.Lock() i.lastLock.Lock() - i.aligned = false + i.aligned = true i.alignLock.Unlock() i.buf.Reset() @@ -308,7 +308,7 @@ func (i *InetChecksum) WriteByte(c byte) (err error) { } if !i.disabledBuf { - if err = i.WriteByte(c); err != nil { + if err = i.buf.WriteByte(c); err != nil { i.sum = origSum i.aligned = origAligned i.last = origLast diff --git a/netx/inetcksum/funcs_inetchecksumsimple.go b/netx/inetcksum/funcs_inetchecksumsimple.go index 1fdf712..32849e1 100644 --- a/netx/inetcksum/funcs_inetchecksumsimple.go +++ b/netx/inetcksum/funcs_inetchecksumsimple.go @@ -27,7 +27,7 @@ func (i *InetChecksumSimple) Reset() { i.last = 0x00 i.sum = 0 - i.last = 0x00 + i.aligned = true } diff --git a/netx/inetcksum/types.go b/netx/inetcksum/types.go index 637dff8..2bd8434 100644 --- a/netx/inetcksum/types.go +++ b/netx/inetcksum/types.go @@ -17,8 +17,8 @@ type ( If [InetChecksum.Aligned] returns false, the checksum result of an [InetChecksum.Sum] or [InetChecksum.Sum16] (or any other operation returning a sum) will INCLUDE THE PAD NULL BYTE (which is only - applied *at the time of the Sum/Sum32 call) and is NOT applied to - the persistent underlying storage. + applied *at the time of the Sum/Sum32 call* and is NOT applied to + the persistent underlying storage). InetChecksum differs from [InetChecksumSimple] in that it: diff --git a/stringsx/funcs_test.go b/stringsx/funcs_test.go index c3da753..9fd89b1 100644 --- a/stringsx/funcs_test.go +++ b/stringsx/funcs_test.go @@ -37,113 +37,6 @@ type ( } ) -func TestIndent(t *testing.T) { - - var out string - var tests []testIndentSet = []testIndentSet{ - testIndentSet{ - name: "standard, no trailing newline", - orig: "foo\nbar\nbaz", - indent: "", - lvl: 1, - ws: false, - empty: false, - tgt: "\tfoo\n\tbar\n\tbaz", - }, - testIndentSet{ - name: "standard, trailing newline", - orig: "foo\nbar\nbaz\n", - indent: "", - lvl: 1, - ws: false, - empty: false, - tgt: "\tfoo\n\tbar\n\tbaz\n", - }, - testIndentSet{ - name: "standard, trailing newline with empty", - orig: "foo\nbar\nbaz\n", - indent: "", - lvl: 1, - ws: false, - empty: true, - tgt: "\tfoo\n\tbar\n\tbaz\n\t", - }, - testIndentSet{ - name: "standard, trailing newline with ws", - orig: "foo\nbar\nbaz\n", - indent: "", - lvl: 1, - ws: true, - empty: false, - tgt: "\tfoo\n\tbar\n\tbaz\n", - }, - testIndentSet{ - name: "standard, trailing newline with ws and empty", - orig: "foo\nbar\nbaz\n", - indent: "", - lvl: 1, - ws: true, - empty: true, - tgt: "\tfoo\n\tbar\n\tbaz\n\t", - }, - testIndentSet{ - name: "standard, trailing ws newline with empty", - orig: "foo\nbar\nbaz\n ", - indent: "", - lvl: 1, - ws: false, - empty: true, - tgt: "\tfoo\n\tbar\n\tbaz\n ", - }, - testIndentSet{ - name: "standard, trailing ws newline with ws", - orig: "foo\nbar\nbaz\n ", - indent: "", - lvl: 1, - ws: true, - empty: false, - tgt: "\tfoo\n\tbar\n\tbaz\n\t ", - }, - testIndentSet{ - name: "standard, trailing ws newline with ws and empty", - orig: "foo\nbar\nbaz\n \n", - indent: "", - lvl: 1, - ws: true, - empty: true, - tgt: "\tfoo\n\tbar\n\tbaz\n\t \n\t", - }, - testIndentSet{ - name: "comment", - orig: "foo\nbar\nbaz", - indent: "# ", - lvl: 1, - ws: false, - empty: false, - tgt: "# foo\n# bar\n# baz", - }, - } - - for idx, ts := range tests { - out = Indent(ts.orig, ts.indent, ts.lvl, ts.ws, ts.empty) - if out == ts.tgt { - t.Logf("[%d] OK (%s): %#v: got %#v", idx, ts.name, ts.orig, out) - } else { - t.Errorf( - "[%d] FAIL (%s): %#v (len %d):\n"+ - "\t\t\texpected (len %d): %#v\n"+ - "\t\t\tgot (len %d): %#v\n"+ - "\t\t%#v", - idx, ts.name, ts.orig, len(ts.orig), - len(ts.tgt), ts.tgt, - len(out), out, - ts, - ) - } - } - -} - func TestRedact(t *testing.T) { var out string