v0.2.1
FIXED: * host splitter wasn't working quite correctly; this has been fixed.
This commit is contained in:
@@ -193,25 +193,26 @@ func CheckReserved(nets []*netip.Prefix, revRecursive, recursive, excludePrivate
|
||||
reservations = make(map[netip.Prefix]*IANAAddrNetResRecord)
|
||||
}
|
||||
reservations[*n] = res
|
||||
if !revRecursive && !recursive {
|
||||
continue
|
||||
}
|
||||
for p, r := range reserved {
|
||||
// This... *should* be safe? I don't think any reservations overlap.
|
||||
// Anyways, revRecursive works because n.Addr() returns the network address, which should be the canonical boundary.
|
||||
// recursive works for the same reason, just the other end.
|
||||
// Math!
|
||||
if revRecursive && p.Contains(n.Addr()) {
|
||||
if reservations == nil {
|
||||
reservations = make(map[netip.Prefix]*IANAAddrNetResRecord)
|
||||
}
|
||||
reservations[p] = r
|
||||
} else if recursive && n.Contains(p.Addr()) {
|
||||
if reservations == nil {
|
||||
reservations = make(map[netip.Prefix]*IANAAddrNetResRecord)
|
||||
}
|
||||
reservations[p] = r
|
||||
}
|
||||
if !revRecursive && !recursive {
|
||||
continue
|
||||
}
|
||||
for p, r := range reserved {
|
||||
// This... *should* be safe? I don't think any reservations overlap.
|
||||
// Anyways, revRecursive works because n.Addr() returns the network address, which should be the canonical boundary.
|
||||
// recursive works for the same reason, just the other end.
|
||||
// Math!
|
||||
if revRecursive && p.Contains(n.Addr()) {
|
||||
if reservations == nil {
|
||||
reservations = make(map[netip.Prefix]*IANAAddrNetResRecord)
|
||||
}
|
||||
reservations[p] = r
|
||||
}
|
||||
if recursive && n.Bits() < p.Bits() && n.Contains(p.Addr()) {
|
||||
if reservations == nil {
|
||||
reservations = make(map[netip.Prefix]*IANAAddrNetResRecord)
|
||||
}
|
||||
reservations[p] = r
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,7 +224,7 @@ func CheckReserved(nets []*netip.Prefix, revRecursive, recursive, excludePrivate
|
||||
func Contain(origPfx *netip.Prefix, nets []*netip.Prefix, remaining *netipx.IPSet, splitter NetSplitter) (s *StructuredResults, err error) {
|
||||
|
||||
var rem []netip.Prefix
|
||||
var reserved map[netip.Prefix]*IANAAddrNetResRecord
|
||||
// var reserved map[netip.Prefix]*IANAAddrNetResRecord
|
||||
var sr = StructuredResults{
|
||||
Original: origPfx,
|
||||
}
|
||||
@@ -276,19 +277,21 @@ func Contain(origPfx *netip.Prefix, nets []*netip.Prefix, remaining *netipx.IPSe
|
||||
}
|
||||
}
|
||||
|
||||
if nets != nil {
|
||||
if reserved, err = CheckReserved(nets, true, true, false); err != nil {
|
||||
return
|
||||
}
|
||||
if reserved != nil && len(reserved) > 0 {
|
||||
s.Reservations = make([]*IANAAddrNetResRecord, len(reserved))
|
||||
idx := 0
|
||||
for _, r := range reserved {
|
||||
s.Reservations[idx] = r
|
||||
idx++
|
||||
/*
|
||||
if nets != nil {
|
||||
if reserved, err = CheckReserved(nets, true, true, false); err != nil {
|
||||
return
|
||||
}
|
||||
if reserved != nil && len(reserved) > 0 {
|
||||
s.Reservations = make([]*IANAAddrNetResRecord, len(reserved))
|
||||
idx := 0
|
||||
for _, r := range reserved {
|
||||
s.Reservations[idx] = r
|
||||
idx++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
s = &sr
|
||||
|
||||
@@ -377,27 +380,105 @@ func MaskInvert(mask net.IPMask) (inverted net.IPMask) {
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
NumAddrsIn returns the number of addresses in a given prefix length
|
||||
and inet family.
|
||||
|
||||
If isIpv6 is false, it is assumed to be IPv4 (...duh).
|
||||
|
||||
inclNet and inclBcast have the same meanings as in NumAddrsNet and NumAddrsPfx.
|
||||
|
||||
Note that for the single-host prefix (/32 for IPv4, /128 for IPv6), numAddrs will *always* be 1.
|
||||
For point-to-point prefix (IPv4 /31, IPv6 /127), numAddrs will *ALWAYS* be 2.
|
||||
*/
|
||||
func NumAddrsIn(prefixLen uint8, isIpv6, inclNet, inclBcast bool) (numAddrs *big.Int, err error) {
|
||||
|
||||
var numBits uint
|
||||
var numRemoved int64
|
||||
var maxBitLen uint8 = maxBitsv4
|
||||
|
||||
if isIpv6 {
|
||||
maxBitLen = maxBitsv6
|
||||
}
|
||||
if prefixLen > maxBitLen {
|
||||
err = ErrBadPrefixLen
|
||||
return
|
||||
}
|
||||
if prefixLen == maxBitLen {
|
||||
numAddrs = big.NewInt(1)
|
||||
return
|
||||
}
|
||||
if (prefixLen + 1) == maxBitLen {
|
||||
numAddrs = big.NewInt(2)
|
||||
return
|
||||
}
|
||||
|
||||
numBits = uint(maxBitLen - prefixLen)
|
||||
|
||||
numAddrs = new(big.Int).Lsh(big.NewInt(1), numBits)
|
||||
|
||||
if !inclNet {
|
||||
numRemoved++
|
||||
}
|
||||
if !inclBcast {
|
||||
numRemoved++
|
||||
}
|
||||
if numRemoved > 0 {
|
||||
_ = numAddrs.Sub(numAddrs, big.NewInt(numRemoved))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
NumAddrsNet returns the number of IP addresses in a net.IPNet.
|
||||
|
||||
# The network address is included in the count if inclNet is true, otherwise it is excluded
|
||||
The network address is included in the count if inclNet is true, otherwise it is excluded.
|
||||
|
||||
Only assignable addresses ("hosts") are considered if hostsOnly is true,
|
||||
otherwise all addresses are counted (depending on inclNet).
|
||||
The broadcast (or reserved broadcast, in the case of IPv6) address will be included in
|
||||
the count if inclBcast is true, otherwise it is excluded.
|
||||
|
||||
numAddrs will be nil if pfx is nil or invalid.
|
||||
*/
|
||||
func NumAddrsNet(pfx *net.IPNet, inclNet, hostsOnly bool) (numAddrs *big.Int) {
|
||||
func NumAddrsNet(pfx *net.IPNet, inclNet, inclBcast bool) (numAddrs *big.Int) {
|
||||
|
||||
var nPfx netip.Prefix
|
||||
var ok bool
|
||||
|
||||
if pfx == nil {
|
||||
return
|
||||
}
|
||||
if nPfx, ok = netipx.FromStdIPNet(pfx); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO
|
||||
numAddrs = NumAddrsPfx(nPfx, inclNet, inclBcast)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NumAddrsPfx is the exact same as NumAddrsNet but for a net/netip.Prefix instead.
|
||||
// TODO
|
||||
func NumAddrsPfx(pfx netip.Prefix, inclNet, inclBcast bool) (numAddrs *big.Int) {
|
||||
|
||||
var numBits uint
|
||||
var numRemoved int64
|
||||
|
||||
numBits = uint(pfx.Addr().BitLen() - pfx.Bits())
|
||||
|
||||
numAddrs = new(big.Int).Lsh(big.NewInt(1), numBits)
|
||||
|
||||
if !inclNet {
|
||||
numRemoved++
|
||||
}
|
||||
if !inclBcast {
|
||||
numRemoved++
|
||||
}
|
||||
if numRemoved > 0 {
|
||||
_ = numAddrs.Sub(numAddrs, big.NewInt(numRemoved))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
NumNets returns the number of times prefix size subnet fits into prefix size network.
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
package netsplit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/projectdiscovery/mapcidr"
|
||||
"go4.org/netipx"
|
||||
)
|
||||
|
||||
@@ -16,54 +13,84 @@ This strategy attempts to split the network into subnets of equal number of host
|
||||
|
||||
remaining may or may not be nil depending on if the number of hosts can fit cleanly within equal network sizes on boundaries.
|
||||
|
||||
An ErrBadNumHosts will be returned if the number of hosts does not match the *addressable* range in a prefix.
|
||||
An ErrBadNumHosts will be returned if the number of hosts does not match the *exact* number of addresses per spec in a prefix.
|
||||
*/
|
||||
func (h *HostSplitter) Split() (nets []*netip.Prefix, remaining *netipx.IPSet, err error) {
|
||||
|
||||
var pfx netip.Prefix
|
||||
var tgt *big.Int
|
||||
var splitCidr int
|
||||
var hosts *big.Int
|
||||
var sub netip.Prefix
|
||||
var subPtr *netip.Prefix
|
||||
var split []*net.IPNet
|
||||
var ipsb *netipx.IPSetBuilder = new(netipx.IPSetBuilder)
|
||||
var found bool
|
||||
var cs *CIDRSplitter
|
||||
|
||||
if h == nil || h.NumberHosts == 0 || h.BaseSplitter == nil || h.network == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if split, err = mapcidr.SplitIPNetByNumber(h.network, int(h.NumberHosts)); err != nil {
|
||||
pfx, _ = netipx.FromStdIPNet(h.network)
|
||||
tgt = new(big.Int)
|
||||
tgt.SetUint64(uint64(h.NumberHosts))
|
||||
|
||||
if NumAddrsPfx(pfx, h.InclNetAddr, h.InclBcastAddr).Cmp(tgt) < 0 {
|
||||
// The number of hosts per-subnet exceeds the number of addresses in the specified network.
|
||||
err = ErrNoNetSpace
|
||||
return
|
||||
}
|
||||
/*
|
||||
Iterate up through prefix lengths for the inet family's maximum length, getting larger and larger,
|
||||
until we reach the first prefix that can contain tgt.
|
||||
If we reach h.network.Bits(), we are forced to use that.
|
||||
(Any case otherwise should be handled by the above checks.)
|
||||
*/
|
||||
for splitCidr = pfx.Addr().BitLen(); splitCidr >= pfx.Bits(); splitCidr-- {
|
||||
if hosts, err = NumAddrsIn(uint8(splitCidr), pfx.Addr().Is6(), h.InclNetAddr, h.InclBcastAddr); err != nil {
|
||||
return
|
||||
}
|
||||
if hosts.Cmp(tgt) >= 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// Pragmatically, we should never be able to get to this code.
|
||||
err = ErrNoNetSpace
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(split)
|
||||
|
||||
tgt = big.NewInt(0)
|
||||
tgt.SetUint64(uint64(h.NumberHosts))
|
||||
|
||||
nets = make([]*netip.Prefix, len(split))
|
||||
for idx, n := range split {
|
||||
sub, _ = netipx.FromStdIPNet(n)
|
||||
hosts = mapcidr.CountIPsInCIDR(false, false, n)
|
||||
if hosts == nil || tgt.Cmp(hosts) != 0 {
|
||||
err = &SplitErr{
|
||||
Wrapped: ErrBadNumHosts,
|
||||
Nets: nets,
|
||||
Remaining: remaining,
|
||||
LastSubnet: &sub,
|
||||
RequestedPrefixLen: uint8(sub.Bits()),
|
||||
}
|
||||
ipsb.AddPrefix(sub)
|
||||
} else {
|
||||
subPtr = new(netip.Prefix)
|
||||
*subPtr = sub
|
||||
nets = append(nets, subPtr)
|
||||
// Now that we have an appropriate prefix length for splitting, we can offload a huge portion of that to a CIDRSplitter.
|
||||
cs = &CIDRSplitter{
|
||||
PrefixLength: uint8(splitCidr),
|
||||
BaseSplitter: h.BaseSplitter,
|
||||
}
|
||||
if nets, remaining, err = cs.Split(); err != nil {
|
||||
return
|
||||
}
|
||||
// If strict mode is enabled, we then need to match the number of hosts exactly in the subnet.
|
||||
if !h.Strict {
|
||||
return
|
||||
}
|
||||
// First off, if remaining is not nil/empty, that immediately fails strict.
|
||||
if remaining != nil && remaining.Prefixes() != nil && len(remaining.Prefixes()) != 0 {
|
||||
err = &SplitErr{
|
||||
Wrapped: ErrBadNumHosts,
|
||||
Nets: nets,
|
||||
Remaining: remaining,
|
||||
LastSubnet: nil,
|
||||
RequestedPrefixLen: uint8(splitCidr),
|
||||
}
|
||||
|
||||
nets[idx] = new(netip.Prefix)
|
||||
*nets[idx] = sub
|
||||
return
|
||||
}
|
||||
|
||||
if remaining, err = ipsb.IPSet(); err != nil {
|
||||
// Then we check the cidr we split on, and check its number of hosts.
|
||||
if hosts.Cmp(tgt) != 0 {
|
||||
err = &SplitErr{
|
||||
Wrapped: ErrBadNumHosts,
|
||||
Nets: nets,
|
||||
Remaining: remaining,
|
||||
LastSubnet: nil,
|
||||
RequestedPrefixLen: uint8(splitCidr),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,10 @@ It attempts to evenly distribute addresses amoungs subnets.
|
||||
type HostSplitter struct {
|
||||
// NumberHosts is the number of hosts to be placed in each subnet to split out.
|
||||
NumberHosts uint `json:"hosts" xml:"hosts,attr" yaml:"Number of Hosts Per Subnet"`
|
||||
// InclNetAddr, if true, specifies that NumberHosts includes the network address.
|
||||
InclNetAddr bool `json:"net_addr" xml:"netAddr,attr,omitempty" yaml:"Network Address Included,omitempty"`
|
||||
// InclBcastAddr, if true, specifies that NumberHosts includes the broadcast address.
|
||||
InclBcastAddr bool `json:"bcast_addr" xml:"bcast,attr,omitempty" yaml:"Broadcast Address Included,omitempty"`
|
||||
// Strict, if true, will return an error from Split if the network cannot split into subnets of NumberHosts-addressable networks exactly.
|
||||
Strict bool `json:"strict" xml:"strict,attr,omitempty" yaml:"Strictly Equal Hosts Per Subnet"`
|
||||
*BaseSplitter `json:"net" xml:"net,omitempty" yaml:"network,omitempty"`
|
||||
@@ -78,14 +82,14 @@ type VLSMSplitter struct {
|
||||
(ascending order) instead of larger networks/smaller prefixes (descending order).
|
||||
You almost assuredly do not want to do this.
|
||||
*/
|
||||
Ascending bool
|
||||
Ascending bool `json:"asc,omitempty" xml:"asc,attr,omitempty" yaml:"Ascending Order,omitempty"`
|
||||
/*
|
||||
Explicit, if true, will ignore Ascending completely and split in the explicit order of PrefixLengths.
|
||||
|
||||
This has the potential to be *extremely* wasteful of addressing space as the resulting blocks are
|
||||
VERY unoptimized.
|
||||
*/
|
||||
Explicit bool
|
||||
Explicit bool `json:"explicit,omitempty" xml:"explicit,attr,omitempty" yaml:"Explicit Ordering,omitempty"`
|
||||
// PrefixLengths contains the prefix lengths of each subnet to split out from the network.
|
||||
PrefixLengths []uint8 `json:"prefixes" xml:"prefixes>prefix" yaml:"Prefix Lengths"`
|
||||
*BaseSplitter `json:"net" xml:"net,omitempty" yaml:"network,omitempty"`
|
||||
|
||||
Reference in New Issue
Block a user