Compare commits
41 Commits
master
...
4.x_rewrit
Author | SHA1 | Date | |
---|---|---|---|
8278c55cab | |||
9d018be8aa | |||
75580b43cc | |||
716120e14b | |||
a376bea0e9 | |||
69b6ec60d0 | |||
0dd54a604d | |||
c48c752f84 | |||
82c21f170a | |||
f677839194 | |||
4ed9ef5f5f | |||
5017d76064 | |||
0682137b21 | |||
7c0c7bf5c0 | |||
ee653e81f6 | |||
9f74e97c45 | |||
b134ee67bd | |||
e818b04f16 | |||
4de9d1a26c | |||
1d9b40a597 | |||
f4f131890d | |||
b2498ba98d | |||
559789ffe5 | |||
bf12fbcda3 | |||
1df5bd87e0 | |||
ed7ccdeeaf | |||
befcd8185e | |||
262eefba07 | |||
46a9df6ef6 | |||
d9ee277ff4 | |||
721c571da6 | |||
96bca202f0 | |||
303e006b35 | |||
b2622406f0 | |||
7819b5edc4 | |||
a315468ff8 | |||
f4c5c0fdf8 | |||
4dd03dea75 | |||
5182e8154b | |||
c6a837d1fe | |||
aaf03db8bd |
35
.gitignore
vendored
35
.gitignore
vendored
@ -1,38 +1,18 @@
|
|||||||
# We don't want local build settings in case someone's using
|
# We don't want local build settings in case someone's using
|
||||||
# the git dir as a place to store their build.ini
|
# the git dir as a place to store their build.ini
|
||||||
/build.ini
|
confs/*
|
||||||
/build.ini.*
|
|
||||||
/dist.build.ini
|
|
||||||
*.bak
|
|
||||||
|
|
||||||
# These are user-controlled.
|
# We don't need these in git. They should be generated dynamically, or they're used in testing/development or local
|
||||||
/overlay
|
# to a workstation.
|
||||||
!/overlay/x86_64
|
|
||||||
!/overlay/i686
|
|
||||||
!/overlay/etc
|
|
||||||
extra/templates/overlay
|
|
||||||
!extra/templates/overlay/x86_64
|
|
||||||
!extra/templates/overlay/i686
|
|
||||||
!extra/templates/overlay/etc
|
|
||||||
# The default doesn't store these in the git working dir,
|
|
||||||
# but better safe than sorry.
|
|
||||||
/root.x86_64
|
|
||||||
/root.i686
|
|
||||||
/http
|
|
||||||
/iso
|
|
||||||
/temp
|
|
||||||
/tftpboot
|
|
||||||
|
|
||||||
# We don't need these in git. They should be generated dynamically.
|
|
||||||
.latest.*.tar
|
.latest.*.tar
|
||||||
/buildnum
|
/buildnum
|
||||||
/screenlog*
|
screenlog*
|
||||||
/logs
|
/logs
|
||||||
*.swp
|
*.swp
|
||||||
*.lck
|
*.lck
|
||||||
*~
|
*~
|
||||||
.~lock.*
|
.~lock.*
|
||||||
/extrasrc
|
.idea/
|
||||||
|
|
||||||
# You should really generate local copies of these, as they're pretty private.
|
# You should really generate local copies of these, as they're pretty private.
|
||||||
**/etc/dropbear
|
**/etc/dropbear
|
||||||
@ -47,4 +27,7 @@ extra/templates/overlay
|
|||||||
# and we DEFINITELY don't need these.
|
# and we DEFINITELY don't need these.
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
bdisk/test.py
|
*test*.py
|
||||||
|
*test*.sh
|
||||||
|
*test*.exp
|
||||||
|
*.bak
|
@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you
|
|||||||
may consider it more useful to permit linking proprietary applications with
|
may consider it more useful to permit linking proprietary applications with
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
Public License instead of this License. But first, please read
|
Public License instead of this License. But first, please read
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
43
TODO
Normal file
43
TODO
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
- write classes/functions
|
||||||
|
- XML-based config
|
||||||
|
-x XML syntax
|
||||||
|
--- x regex btags - case-insensitive? this can be represented in-pattern:
|
||||||
|
x https://stackoverflow.com/a/9655186/733214
|
||||||
|
--- remove sources stuff - that should be in the guest definitions.
|
||||||
|
-x configuration generator
|
||||||
|
--- x print end result xml config to stderr for easier redirection? or print prompts to stderr and xml to stdout?
|
||||||
|
-- x XSD for validation
|
||||||
|
-- Flask app for generating config?
|
||||||
|
-- TKinter (or pygame?) GUI?
|
||||||
|
--- https://docs.python.org/3/faq/gui.html
|
||||||
|
--- https://www.pygame.org/wiki/gui
|
||||||
|
- ensure we use docstrings in a Sphinx-compatible manner?
|
||||||
|
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html
|
||||||
|
at the very least document all the functions and such so pydoc's happy.
|
||||||
|
|
||||||
|
- locking
|
||||||
|
|
||||||
|
- for docs, 3.x (as of 3.10) was 2.4M.
|
||||||
|
|
||||||
|
- x Need ability to write/parse mtree specs (or a similar equivalent) for applying ownerships/permissions to overlay files
|
||||||
|
-- parsing is done. writing may? come later.
|
||||||
|
--- i think writing is mostly done/straightforward; still need to work on parsing mode octals for files
|
||||||
|
|
||||||
|
|
||||||
|
- package for PyPI:
|
||||||
|
# https://packaging.python.org/tutorials/distributing-packages/
|
||||||
|
# https://docs.python.org/3/distutils/apiref.html
|
||||||
|
# https://python-packaging.readthedocs.io/en/latest/minimal.html
|
||||||
|
# https://setuptools.readthedocs.io/en/latest/setuptools.html#new-and-changed-setup-keywords
|
||||||
|
|
||||||
|
|
||||||
|
BUGS.SQUARE-R00T.NET bugs/tasks:
|
||||||
|
#7: Ensure conditional deps/imports for features only if used.
|
||||||
|
Is this setup.py-compatible?
|
||||||
|
nooope. just make everything a dep.
|
||||||
|
#14: Use os.path.join() for more consistency/pythonicness
|
||||||
|
#24: Run as regular user? (pychroot? fakeroot?)
|
||||||
|
#34: Build-time support for only building single phase of build
|
||||||
|
#39: Fix UEFI
|
||||||
|
#40: ISO overlay (to add e.g. memtest86+ to final ISO)
|
||||||
|
#43: Support resuming partial tarball downloads (Accept-Ranges: bytes)
|
4
bdisk/BIOS.py
Normal file
4
bdisk/BIOS.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
import jinja2
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
2
bdisk/GIT.py
Normal file
2
bdisk/GIT.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
import git
|
||||||
|
import os
|
493
bdisk/GPG.py
Normal file
493
bdisk/GPG.py
Normal file
@ -0,0 +1,493 @@
|
|||||||
|
import copy
|
||||||
|
import datetime
|
||||||
|
import gpg
|
||||||
|
import operator
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import utils # LOCAL
|
||||||
|
from functools import reduce
|
||||||
|
from gpg import gpgme
|
||||||
|
|
||||||
|
# Reference material.
|
||||||
|
# http://files.au.adversary.org/crypto/GPGMEpythonHOWTOen.html
|
||||||
|
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gpgme.git;a=tree;f=lang/python/examples;hb=HEAD
|
||||||
|
# https://www.gnupg.org/documentation/manuals/gpgme.pdf
|
||||||
|
# Support ECC? https://www.gnupg.org/faq/whats-new-in-2.1.html#ecc
|
||||||
|
# section 4.1, 4.2, 7.5.1, 7.5.5 in gpgme manual
|
||||||
|
|
||||||
|
# These are static values. We include them in the parent so we don't define them every time a function is called.
|
||||||
|
# Key signature attributes.
|
||||||
|
_keysig_attrs = ('comment', 'email', 'expired', 'expires', 'exportable', 'invalid', 'keyid', 'name', 'notations',
|
||||||
|
'pubkey_algo', 'revoked', 'sig_class', 'status', 'timestamp', 'uid')
|
||||||
|
# Data signature attributes.
|
||||||
|
_sig_attrs = ('chain_model', 'exp_timestamp', 'fpr', 'hash_algo', 'is_de_vs', 'key', 'notations', 'pka_address',
|
||||||
|
'pka_trust', 'pubkey_algo', 'status', 'summary', 'timestamp', 'validity', 'validity_reason',
|
||||||
|
'wrong_key_usage')
|
||||||
|
|
||||||
|
# A regex that ignores signature verification validity errors we don't care about.
|
||||||
|
_valid_ignore = re.compile(('^('
|
||||||
|
#'CHECKSUM|'
|
||||||
|
'ELEMENT_NOT_FOUND|'
|
||||||
|
'MISSING_VALUE|'
|
||||||
|
#'UNKNOWN_PACKET|'
|
||||||
|
'UNSUPPORTED_CMS_OBJ|'
|
||||||
|
'WRONG_SECKEY|'
|
||||||
|
'('
|
||||||
|
'DECRYPT|'
|
||||||
|
'INV|'
|
||||||
|
'NO|'
|
||||||
|
'PIN|'
|
||||||
|
'SOURCE'
|
||||||
|
')_'
|
||||||
|
')'))
|
||||||
|
# A function to build a list based on the above.
|
||||||
|
def _gen_valid_validities():
|
||||||
|
# Strips out and minimizes the error output.
|
||||||
|
v = {}
|
||||||
|
for s in dir(gpg.constants.validity):
|
||||||
|
if _valid_ignore.search(s):
|
||||||
|
continue
|
||||||
|
val = getattr(gpg.constants.validity, s)
|
||||||
|
if not isinstance(val, int):
|
||||||
|
continue
|
||||||
|
v[s] = val
|
||||||
|
return(v)
|
||||||
|
_valid_validities = _gen_valid_validities()
|
||||||
|
def _get_sigstatus(status):
|
||||||
|
statuses = []
|
||||||
|
for e in _valid_validities:
|
||||||
|
if ((status & _valid_validities[e]) == _valid_validities[e]):
|
||||||
|
statuses.append(e)
|
||||||
|
return(statuses)
|
||||||
|
def _get_sig_isgood(sigstat):
|
||||||
|
is_good = True
|
||||||
|
if not ((sigstat & gpg.constants.sigsum.GREEN) == gpg.constants.sigsum.GREEN):
|
||||||
|
is_good = False
|
||||||
|
if not ((sigstat & gpg.constants.sigsum.VALID) == gpg.constants.sigsum.VALID):
|
||||||
|
is_good = False
|
||||||
|
return(is_good)
|
||||||
|
|
||||||
|
|
||||||
|
# This helps translate the input name from the conf to a string compatible with the gpg module.
|
||||||
|
_algmaps = {#'cv': 'cv{keysize}', # DISABLED, can't sign (only encrypt). Currently only 25519
|
||||||
|
'ed': 'ed{keysize}', # Currently only 25519
|
||||||
|
#'elg': 'elg{}', # DISABLED, can't sign (only encrypt). 1024, 2048, 4096
|
||||||
|
'nist': 'nistp{keysize}', # 256, 384, 521
|
||||||
|
'brainpool.1': 'brainpoolP{keysize}r1', # 256, 384, 512
|
||||||
|
'sec.k1': 'secp{keysize}k1', # Currently only 256
|
||||||
|
'rsa': 'rsa{keysize}', # Variable (1024 <> 4096), but we only support 1024, 2048, 4096
|
||||||
|
'dsa': 'dsa{keysize}'} # Variable (768 <> 3072), but we only support 768, 2048, 3072
|
||||||
|
|
||||||
|
# This is just a helper function to get a delta from a unix epoch.
|
||||||
|
def _epoch_helper(epoch):
|
||||||
|
d = datetime.datetime.utcfromtimestamp(epoch) - datetime.datetime.utcnow()
|
||||||
|
return(abs(int(d.total_seconds()))) # Returns a positive integer even if negative...
|
||||||
|
#return(int(d.total_seconds()))
|
||||||
|
|
||||||
|
# _KeyEditor and _getEditPrompt are used to interactively edit keys -- notably currently used for editing trusts
|
||||||
|
# (since there's no way to edit trust otherwise).
|
||||||
|
# https://www.gnupg.org/documentation/manuals/gpgme/Advanced-Key-Editing.html
|
||||||
|
# https://www.apt-browse.org/browse/debian/wheezy/main/amd64/python-pyme/1:0.8.1-2/file/usr/share/doc/python-pyme/examples/t-edit.py
|
||||||
|
# https://searchcode.com/codesearch/view/20535820/
|
||||||
|
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS
|
||||||
|
# You can get the prompt identifiers and status indicators without grokking the source
|
||||||
|
# by first interactively performing the type of edit(s) you want to do with this command:
|
||||||
|
# gpg --expert --status-fd 2 --command-fd 2 --edit-key <KEY_ID>
|
||||||
|
# Per:
|
||||||
|
# https://lists.gnupg.org/pipermail/gnupg-users/2002-April/012630.html
|
||||||
|
# https://lists.gt.net/gnupg/users/9544
|
||||||
|
# https://raymii.org/s/articles/GPG_noninteractive_batch_sign_trust_and_send_gnupg_keys.html
|
||||||
|
class _KeyEditor(object):
|
||||||
|
def __init__(self, optmap):
|
||||||
|
self.replied_once = False # This is used to handle the first prompt vs. the last
|
||||||
|
self.optmap = optmap
|
||||||
|
|
||||||
|
def editKey(self, status, args, out):
|
||||||
|
result = None
|
||||||
|
out.seek(0, 0)
|
||||||
|
def mapDict(m, d):
|
||||||
|
return(reduce(operator.getitem, m, d))
|
||||||
|
if args == 'keyedit.prompt' and self.replied_once:
|
||||||
|
result = 'quit'
|
||||||
|
elif status == 'KEY_CONSIDERED':
|
||||||
|
result = None
|
||||||
|
self.replied_once = False
|
||||||
|
elif status == 'GET_LINE':
|
||||||
|
self.replied_once = True
|
||||||
|
_ilist = args.split('.')
|
||||||
|
result = mapDict(_ilist, self.optmap['prompts'])
|
||||||
|
if not result:
|
||||||
|
result = None
|
||||||
|
return(result)
|
||||||
|
|
||||||
|
def _getEditPrompt(key, trust, cmd, uid = None):
|
||||||
|
if not uid:
|
||||||
|
uid = key.uids[0]
|
||||||
|
# This mapping defines the default "answers" to the gpgme key editing.
|
||||||
|
# https://www.apt-browse.org/browse/debian/wheezy/main/amd64/python-pyme/1:0.8.1-2/file/usr/share/doc/python-pyme/examples/t-edit.py
|
||||||
|
# https://searchcode.com/codesearch/view/20535820/
|
||||||
|
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS
|
||||||
|
# You can get the prompt identifiers and status indicators without grokking the source
|
||||||
|
# by first interactively performing the type of edit(s) you want to do with this command:
|
||||||
|
# gpg --status-fd 2 --command-fd 2 --edit-key <KEY_ID>
|
||||||
|
if trust >= gpg.constants.validity.FULL: # For tsigning, it only prompts for two trust levels:
|
||||||
|
_loctrust = 2 # "I trust fully"
|
||||||
|
else:
|
||||||
|
_loctrust = 1 # "I trust marginally"
|
||||||
|
# TODO: make the trust depth configurable. 1 is probably the safest, but we try to guess here.
|
||||||
|
# "Full" trust is a pretty big thing.
|
||||||
|
if trust >= gpg.constants.validity.FULL:
|
||||||
|
_locdepth = 2 # Allow +1 level of trust extension
|
||||||
|
else:
|
||||||
|
_locdepth = 1 # Only trust this key
|
||||||
|
# The check level.
|
||||||
|
# (0) I will not answer. (default)
|
||||||
|
# (1) I have not checked at all.
|
||||||
|
# (2) I have done casual checking.
|
||||||
|
# (3) I have done very careful checking.
|
||||||
|
# Since we're running this entirely non-interactively, we really should use 1.
|
||||||
|
_chk_lvl = 1
|
||||||
|
_map = {
|
||||||
|
# Valid commands
|
||||||
|
'cmds': ['trust', 'fpr', 'sign', 'tsign', 'lsign', 'nrsign', 'grip', 'list',
|
||||||
|
'uid', 'key', 'check', 'deluid', 'delkey', 'delsig', 'pref', 'showpref',
|
||||||
|
'revsig', 'enable', 'disable', 'showphoto', 'clean', 'minimize', 'save',
|
||||||
|
'quit'],
|
||||||
|
# Prompts served by the interactive session, and a map of their responses.
|
||||||
|
# It's expanded in the parent call, but the prompt is actually in the form of e.g.:
|
||||||
|
# keyedit.save (we expand that to a list and use that list as a "path" in the below dict)
|
||||||
|
# We *could* just use a flat dict of full prompt to constants, but this is a better visual segregation &
|
||||||
|
# prevents unnecessary duplication.
|
||||||
|
'prompts': {
|
||||||
|
'edit_ownertrust': {'value': str(trust), # Pulled at time of call
|
||||||
|
'set_ultimate': {'okay': 'yes'}}, # If confirming ultimate trust, we auto-answer yes
|
||||||
|
'untrusted_key': {'override': 'yes'}, # We don't care if it's untrusted
|
||||||
|
'pklist': {'user_id': {'enter': uid.uid}}, # Prompt for a user ID - can we use the full uid string? (tsign)
|
||||||
|
'sign_uid': {'class': str(_chk_lvl), # The certification/"check" level
|
||||||
|
'okay': 'yes'}, # Are you sure that you want to sign this key with your key..."
|
||||||
|
'trustsig_prompt': {'trust_value': str(_loctrust), # This requires some processing; see above
|
||||||
|
'trust_depth': str(_locdepth), # The "depth" of the trust signature.
|
||||||
|
'trust_regexp': None}, # We can "Restrict" trust to certain domains if we wanted.
|
||||||
|
'keyedit': {'prompt': cmd, # Initiate trust editing (or whatever)
|
||||||
|
'save': {'okay': 'yes'}}}} # Save if prompted
|
||||||
|
return(_map)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class GPGHandler(object):
|
||||||
|
def __init__(self, gnupg_homedir = None, key_id = None, keyservers = None):
|
||||||
|
self.home = gnupg_homedir
|
||||||
|
self.key_id = key_id
|
||||||
|
self.keyservers = keyservers
|
||||||
|
if self.home:
|
||||||
|
self._prep_home()
|
||||||
|
else:
|
||||||
|
self._check_home()
|
||||||
|
self.ctx = self.GetContext(home_dir = self.home)
|
||||||
|
self._orig_kl_mode = self.ctx.get_keylist_mode()
|
||||||
|
self.mykey = None
|
||||||
|
self.subkey = None
|
||||||
|
if self.key_id:
|
||||||
|
self.mykey = self.ctx.get_key(self.key_id, secret = True)
|
||||||
|
for s in self.mykey.subkeys:
|
||||||
|
if s.can_sign:
|
||||||
|
self.subkey = s
|
||||||
|
self.ctx.signers = [self.mykey]
|
||||||
|
break
|
||||||
|
|
||||||
|
def _check_home(self, home = None):
|
||||||
|
if not home:
|
||||||
|
home = self.home
|
||||||
|
if not home:
|
||||||
|
self.home = os.environ.get('GNUPGHOME', '~/.gnupg')
|
||||||
|
home = self.home
|
||||||
|
self._prep_home(home)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def _prep_home(self, home = None):
|
||||||
|
if not home:
|
||||||
|
home = self.home
|
||||||
|
if not home:
|
||||||
|
self.home = os.environ.get('GNUPGHOME', '~/.gnupg')
|
||||||
|
self.home = os.path.abspath(os.path.expanduser(self.home))
|
||||||
|
if os.path.isdir(self.home):
|
||||||
|
_exists = True
|
||||||
|
else:
|
||||||
|
_exists = False
|
||||||
|
_uid = os.getuid()
|
||||||
|
_gid = os.getgid()
|
||||||
|
try:
|
||||||
|
os.makedirs(self.home, exist_ok = True)
|
||||||
|
os.chown(self.home, _uid, _gid)
|
||||||
|
os.chmod(self.home, 0o700)
|
||||||
|
except PermissionError:
|
||||||
|
# It's alright; it's HOPEFULLY already created.
|
||||||
|
if not _exists:
|
||||||
|
raise PermissionError('We need a GnuPG home directory we can '
|
||||||
|
'write to')
|
||||||
|
# TODO: write gpg.conf, parse existing one and write changes if needed.
|
||||||
|
# Should use SHA512 etc. See:
|
||||||
|
# https://spin.atomicobject.com/2013/11/24/secure-gpg-keys-guide/
|
||||||
|
# https://github.com/BetterCrypto/Applied-Crypto-Hardening/blob/master/src/configuration/GPG/GnuPG/gpg.conf
|
||||||
|
# https://riseup.net/en/security/message-security/openpgp/best-practices
|
||||||
|
# And explicitly set keyservers if present in params.
|
||||||
|
return()
|
||||||
|
|
||||||
|
def GetContext(self, **kwargs):
|
||||||
|
ctx = gpg.Context(**kwargs)
|
||||||
|
return(ctx)
|
||||||
|
|
||||||
|
def CreateKey(self, name, algo, keysize, email = None, comment = None, passwd = None, key = None, expiry = None):
|
||||||
|
userid = name
|
||||||
|
userid += ' ({0})'.format(comment) if comment else ''
|
||||||
|
userid += ' <{0}>'.format(email) if email else ''
|
||||||
|
if not expiry:
|
||||||
|
expires = False
|
||||||
|
else:
|
||||||
|
expires = True
|
||||||
|
params = {'algorithm': _algmaps[algo].format(keysize = keysize),
|
||||||
|
'expires': expires,
|
||||||
|
'expires_in': (_epoch_helper(expiry) if expires else 0),
|
||||||
|
'sign': True,
|
||||||
|
'passphrase': passwd}
|
||||||
|
if not key:
|
||||||
|
self.mykey = self.ctx.get_key(self.ctx.create_key(userid, **params).fpr)
|
||||||
|
self.subkey = self.mykey.subkeys[0]
|
||||||
|
else:
|
||||||
|
if not self.mykey:
|
||||||
|
self.mykey = self.ctx.get_key(self.ctx.create_key(userid, **params).fpr)
|
||||||
|
self.subkey = self.ctx.get_key(self.ctx.create_subkey(self.mykey, **params).fpr)
|
||||||
|
self.ctx.signers = [self.subkey]
|
||||||
|
return()
|
||||||
|
|
||||||
|
def ListSigs(self, sig_data):
|
||||||
|
key_ids = []
|
||||||
|
# Currently as of May 13, 2018 there's no way using the GPGME API to do
|
||||||
|
# the equivalent of the CLI's --list-packets. https://dev.gnupg.org/T3734
|
||||||
|
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059708.html
|
||||||
|
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059715.html
|
||||||
|
# We use the "workaround" in:
|
||||||
|
# https://lists.gnupg.org/pipermail/gnupg-users/2018-January/059711.html
|
||||||
|
try:
|
||||||
|
self.ctx.verify(sig_data)
|
||||||
|
except gpg.errors.BadSignatures as sig_except:
|
||||||
|
for line in [i.strip() for i in str(sig_except).splitlines()]:
|
||||||
|
l = [i.strip() for i in line.split(':')]
|
||||||
|
key_ids.append(l[0])
|
||||||
|
return(key_ids)
|
||||||
|
|
||||||
|
def GetSigs(self, data_in, sig_data = None, verify_keys = None):
|
||||||
|
signers = []
|
||||||
|
if verify_keys:
|
||||||
|
# Raises gpg.errors.BadSignatures if any are invalid.
|
||||||
|
# Unlike Verify below, this will raise an exception.
|
||||||
|
signers = verify_keys
|
||||||
|
if sig_data:
|
||||||
|
# Detached sig
|
||||||
|
sig = self.ctx.verify(data_in, signature = sig_data, verify = signers)
|
||||||
|
else:
|
||||||
|
# Cleartext? or "normal" signatures (embedded)
|
||||||
|
sig = self.ctx.verify(data_in, verify = signers)
|
||||||
|
return(sig)
|
||||||
|
|
||||||
|
def GetKeysigs(self, pubkey):
|
||||||
|
sigs = {}
|
||||||
|
fpr = (pubkey if isinstance(pubkey, str) else pubkey.fpr)
|
||||||
|
keys = list(self.ctx.keylist(fpr, mode = (gpg.constants.keylist.mode.LOCAL | gpg.constants.keylist.mode.SIGS)))
|
||||||
|
for idx1, k in enumerate(keys):
|
||||||
|
sigs[k.fpr] = {}
|
||||||
|
for idx2, u in enumerate(k.uids):
|
||||||
|
sigs[k.fpr][u.uid] = {}
|
||||||
|
for idx3, sig in enumerate(u.signatures):
|
||||||
|
signer = getattr(sig, 'keyid')
|
||||||
|
sigs[k.fpr][u.uid][signer] = {}
|
||||||
|
for a in _keysig_attrs:
|
||||||
|
if a == 'keyid':
|
||||||
|
continue
|
||||||
|
sigs[k.fpr][u.uid][signer][a] = getattr(sig, a)
|
||||||
|
return(sigs)
|
||||||
|
|
||||||
|
def CheckSigs(self, sig, sigkeys = None):
|
||||||
|
# sig should be a GetSigs result.
|
||||||
|
is_valid = True
|
||||||
|
# See self.CheckSigs().
|
||||||
|
# https://www.gnupg.org/documentation/manuals/gpgme/Verify.html
|
||||||
|
# https://github.com/micahflee/torbrowser-launcher/issues/262#issuecomment-284342876
|
||||||
|
sig = sig[1]
|
||||||
|
result = {}
|
||||||
|
_keys = [s.fpr.upper() for s in sig.signatures]
|
||||||
|
if sigkeys:
|
||||||
|
if isinstance(sigkeys, str):
|
||||||
|
sigkeys = [sigkeys.upper()]
|
||||||
|
elif isinstance(sigkeys, list):
|
||||||
|
_sigkeys = []
|
||||||
|
for s in sigkeys[:]:
|
||||||
|
if isinstance(s, str):
|
||||||
|
_sigkeys.append(s.upper())
|
||||||
|
elif isinstance(s, gpgme._gpgme_key):
|
||||||
|
_sigkeys.append(s.fpr)
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
sigkeys = _sigkeys
|
||||||
|
elif isinstance(sigkeys, gpgme._gpgme_key):
|
||||||
|
sigkeys = [sigkeys.fpr]
|
||||||
|
else:
|
||||||
|
raise ValueError('sigkeys must be a key fingerprint or a key object (or a list of those).')
|
||||||
|
if not set(sigkeys).issubset(_keys):
|
||||||
|
raise ValueError('All specified keys were not present in the signature.')
|
||||||
|
for s in sig.signatures:
|
||||||
|
fpr = getattr(s, 'fpr')
|
||||||
|
result[fpr] = {}
|
||||||
|
for a in _sig_attrs:
|
||||||
|
if a == 'fpr':
|
||||||
|
continue
|
||||||
|
result[fpr][a] = getattr(s, a)
|
||||||
|
# Now we do some logic to determine if the sig is "valid".
|
||||||
|
# Note that we can get confidence level by &'ing "validity" attr against gpg.constants.validity.*
|
||||||
|
# Or just doing a <, >, <=, etc. operation since it's a sequential list of constants levels, not bitwise.
|
||||||
|
# For now, we just check if it's valid or not, not "how valid" it is (how much we can trust it).
|
||||||
|
_status = s.summary
|
||||||
|
if not _get_sig_isgood(_status):
|
||||||
|
result[fpr]['valid'] = False
|
||||||
|
else:
|
||||||
|
result[fpr]['valid'] = True
|
||||||
|
if sigkeys:
|
||||||
|
for k in sigkeys:
|
||||||
|
if (k not in result) or (not result[k]['valid']):
|
||||||
|
is_valid = False
|
||||||
|
break
|
||||||
|
else: # is_valid is satisfied by at LEAST one valid sig.
|
||||||
|
is_valid = any([k[1]['valid'] for k in result])
|
||||||
|
return(is_valid, result)
|
||||||
|
|
||||||
|
def Sign(self, data_in, ascii = True, mode = 'detached', notations = None):
|
||||||
|
# notations is a list of dicts via notation format:
|
||||||
|
# {<namespace>: {'value': 'some string', 'flags': BITWISE_OR_FLAGS}}
|
||||||
|
# See RFC 4880 § 5.2.3.16 for valid user namespace format.
|
||||||
|
if mode.startswith('d'):
|
||||||
|
mode = gpg.constants.SIG_MODE_DETACH
|
||||||
|
elif mode.startswith('c'):
|
||||||
|
mode = gpg.constants.SIG_MODE_CLEAR
|
||||||
|
elif mode.startswith('n'):
|
||||||
|
mode = gpg.constants.SIG_MODE_NORMAL
|
||||||
|
self.ctx.armor = ascii
|
||||||
|
if not isinstance(data_in, bytes):
|
||||||
|
if isinstance(data_in, str):
|
||||||
|
data_in = data_in.encode('utf-8')
|
||||||
|
else:
|
||||||
|
# We COULD try serializing to JSON here, or converting to a pickle object,
|
||||||
|
# or testing for other classes, etc. But we don't.
|
||||||
|
# TODO?
|
||||||
|
data_in = repr(data_in).encode('utf-8')
|
||||||
|
data_in = gpg.Data(data_in)
|
||||||
|
if notations:
|
||||||
|
for n in notations:
|
||||||
|
if not utils.valid().gpgsigNotation(n):
|
||||||
|
raise ValueError('Malformatted notation: {0}'.format(n))
|
||||||
|
for ns in n:
|
||||||
|
self.ctx.sig_notation_add(ns, n[ns]['value'], n[ns]['flags'])
|
||||||
|
# data_in *always* must be a bytes (or bytes-like?) object.
|
||||||
|
# It will *always* return a bytes object.
|
||||||
|
sig = self.ctx.sign(data_in, mode = mode)
|
||||||
|
# And we need to clear the sig notations, otherwise they'll apply to the next signature this context makes.
|
||||||
|
self.ctx.sig_notation_clear()
|
||||||
|
return(sig)
|
||||||
|
|
||||||
|
def ImportPubkey(self, pubkey):
|
||||||
|
fpr = (pubkey if isinstance(pubkey, str) else pubkey.fpr)
|
||||||
|
try:
|
||||||
|
self.ctx.get_key(fpr)
|
||||||
|
return() # already imported
|
||||||
|
except gpg.errors.KeyNotFound:
|
||||||
|
pass
|
||||||
|
_dflt_klm = self.ctx.get_keylist_mode()
|
||||||
|
self.ctx.set_keylist_mode(gpg.constants.keylist.mode.EXTERN)
|
||||||
|
if isinstance(pubkey, gpgme._gpgme_key):
|
||||||
|
self.ctx.op_import_keys([pubkey])
|
||||||
|
elif isinstance(pubkey, str):
|
||||||
|
if not utils.valid().gpgkeyID(pubkey):
|
||||||
|
raise ValueError('{0} is not a valid key or fingerprint'.format(pubkey))
|
||||||
|
pubkey = self.ctx.get_key(fpr)
|
||||||
|
self.ctx.op_import_keys([pubkey])
|
||||||
|
self.ctx.set_keylist_mode(_dflt_klm)
|
||||||
|
self.SignKey(pubkey)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def ImportPubkeyFromFile(self, pubkey_data):
|
||||||
|
_fpath = os.path.abspath(os.path.expanduser(pubkey_data))
|
||||||
|
if os.path.isfile(_fpath):
|
||||||
|
with open(_fpath, 'rb') as f:
|
||||||
|
k = self.ctx.key_import(f.read())
|
||||||
|
else:
|
||||||
|
k = self.ctx.key_import(pubkey_data)
|
||||||
|
pubkey = self.ctx.get_key(k)
|
||||||
|
self.SignKey(pubkey)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def SignKey(self, pubkey, local = False, notations = None):
|
||||||
|
# notations is a list of dicts via notation format:
|
||||||
|
# {<namespace>: {'value': 'some string', 'flags': BITWISE_OR_FLAGS}}
|
||||||
|
# See RFC 4880 § 5.2.3.16 for valid user namespace format.
|
||||||
|
if isinstance(pubkey, gpgme._gpgme_key):
|
||||||
|
pass
|
||||||
|
elif isinstance(pubkey, str):
|
||||||
|
if not utils.valid().gpgkeyID(pubkey):
|
||||||
|
raise ValueError('{0} is not a valid fingerprint'.format(pubkey))
|
||||||
|
else:
|
||||||
|
pubkey = self.ctx.get_key(pubkey)
|
||||||
|
if notations:
|
||||||
|
for n in notations:
|
||||||
|
if not utils.valid().gpgsigNotation(n):
|
||||||
|
raise ValueError('Malformatted notation: {0}'.format(n))
|
||||||
|
for ns in n:
|
||||||
|
self.ctx.sig_notation_add(ns, n[ns]['value'], n[ns]['flags'])
|
||||||
|
self.ctx.key_sign(pubkey, local = local)
|
||||||
|
self.TrustKey(pubkey)
|
||||||
|
# And we need to clear the sig notations, otherwise they'll apply to the next signature this context makes.
|
||||||
|
self.ctx.sig_notation_clear()
|
||||||
|
return()
|
||||||
|
|
||||||
|
def TrustKey(self, pubkey, trust = gpg.constants.validity.FULL):
|
||||||
|
# We use full as the default because signatures aren't considered valid otherwise.
|
||||||
|
# TODO: we need a way of maybe reverting/rolling back any changes we do?
|
||||||
|
output = gpg.Data()
|
||||||
|
_map = _getEditPrompt(pubkey, trust, 'trust')
|
||||||
|
self.ctx.interact(pubkey, _KeyEditor(_map).editKey, sink = output, fnc_value = output)
|
||||||
|
output.seek(0, 0)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def ExportPubkey(self, fpr, ascii = True, sigs = False):
|
||||||
|
orig_armor = self.ctx.armor
|
||||||
|
self.ctx.armor = ascii
|
||||||
|
if sigs:
|
||||||
|
export_mode = 0
|
||||||
|
else:
|
||||||
|
export_mode = gpg.constants.EXPORT_MODE_MINIMAL # default is 0; minimal strips signatures
|
||||||
|
kb = gpg.Data()
|
||||||
|
self.ctx.op_export_keys([self.ctx.get_key(fpr)], export_mode, kb)
|
||||||
|
kb.seek(0, 0)
|
||||||
|
self.ctx.armor = orig_armor
|
||||||
|
return(kb.read())
|
||||||
|
|
||||||
|
def DeleteKey(self, pubkey):
|
||||||
|
if isinstance(pubkey, gpgme._gpgme_key):
|
||||||
|
pass
|
||||||
|
elif isinstance(pubkey, str):
|
||||||
|
if not utils.valid().gpgkeyID(pubkey):
|
||||||
|
raise ValueError('{0} is not a valid fingerprint'.format(pubkey))
|
||||||
|
else:
|
||||||
|
pubkey = self.ctx.get_key(pubkey)
|
||||||
|
self.ctx.op_delete(pubkey, False)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def Verify(self, sig_data, data):
|
||||||
|
# This is a more "flat" version of CheckSigs.
|
||||||
|
# First we need to parse the sig(s) and import the key(s) to our keyring.
|
||||||
|
signers = self.ListSigs(sig_data)
|
||||||
|
for signer in signers:
|
||||||
|
self.ImportPubkey(signer)
|
||||||
|
try:
|
||||||
|
self.ctx.verify(data, signature = sig_data, verify = signers)
|
||||||
|
return(True)
|
||||||
|
except gpg.errors.BadSignatures as err:
|
||||||
|
return(False)
|
12
bdisk/SSL.py
Normal file
12
bdisk/SSL.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
import OpenSSL
|
||||||
|
# https://cryptography.io/en/latest/x509/reference/#cryptography.x509.CertificateBuilder.sign
|
||||||
|
# migrate old functions of bSSL to use cryptography
|
||||||
|
# but still waiting on their recpipes.
|
||||||
|
# https://cryptography.io/en/latest/x509/tutorial/
|
||||||
|
#import OpenSSL
|
||||||
|
#k = OpenSSL.crypto.PKey()
|
||||||
|
#k.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
||||||
|
#x = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
|
||||||
|
# k,
|
||||||
|
# cipher = 'aes256',
|
||||||
|
# passphrase = 'test')
|
3
bdisk/UEFI.py
Normal file
3
bdisk/UEFI.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
30
bdisk/__init__.py
Normal file
30
bdisk/__init__.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import sys
|
||||||
|
|
||||||
|
"""
|
||||||
|
BDisk - An easy liveCD creator built in python.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# BDisk is only supported on Python 3.4 and up.
|
||||||
|
if sys.version_info.major != 3:
|
||||||
|
raise RuntimeError('BDisk is only supported on Python 3')
|
||||||
|
elif sys.version_info.minor <= 3:
|
||||||
|
raise RuntimeError('BDisk is only supported on Python 3.4 and up')
|
||||||
|
|
||||||
|
# BDisk is only supported on GNU/Linux. There *might* be a way to make it work
|
||||||
|
# with certain *BSDs, but if that's possible at all it'll have to come at a
|
||||||
|
# later date. Patches welcome.
|
||||||
|
# I'd have to find out how to manipulate/create FAT filesystems and such as
|
||||||
|
# well.
|
||||||
|
# I'd be curious to see if I can get it working in Cygwin or WSL:
|
||||||
|
# https://docs.microsoft.com/en-us/windows/wsl/install-win10
|
||||||
|
# And maybe, if we're really pie-in-the-sky, macOS's Fink/Homebrew/Macports.
|
||||||
|
if platform.system() != 'Linux':
|
||||||
|
raise RuntimeError('BDisk is currently only supported on GNU/Linux')
|
||||||
|
|
||||||
|
# CURRENTLY, we require root user because of the chroots and such. However,
|
||||||
|
# there should be creative ways to do this with cgroups as a regular user in
|
||||||
|
# the future. Patches welcome (or at least some input).
|
||||||
|
if os.geteuid() != 0:
|
||||||
|
raise PermissionError('BDisk currently requires root privileges')
|
228
bdisk/bGPG.py
228
bdisk/bGPG.py
@ -1,228 +0,0 @@
|
|||||||
import os
|
|
||||||
from io import BytesIO
|
|
||||||
import subprocess
|
|
||||||
import datetime
|
|
||||||
import jinja2
|
|
||||||
import gpgme
|
|
||||||
import psutil
|
|
||||||
|
|
||||||
def genGPG(conf):
|
|
||||||
# https://media.readthedocs.org/pdf/pygpgme/latest/pygpgme.pdf
|
|
||||||
build = conf['build']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
gpghome = conf['gpg']['mygpghome']
|
|
||||||
distkeys = []
|
|
||||||
gpgkeyserver = []
|
|
||||||
for a in conf['build']['arch']:
|
|
||||||
keysrv = conf['src'][a]['gpgkeyserver']
|
|
||||||
distkey = conf['src'][a]['gpgkey']
|
|
||||||
if keysrv and (keysrv not in gpgkeyserver):
|
|
||||||
gpgkeyserver.append(keysrv)
|
|
||||||
if distkey and(distkey not in distkeys):
|
|
||||||
distkeys.append(distkey)
|
|
||||||
templates_dir = '{0}/extra/templates'.format(build['basedir'])
|
|
||||||
mykey = False
|
|
||||||
pkeys = []
|
|
||||||
killStaleAgent(conf)
|
|
||||||
if conf['gpg']['mygpgkey'] != '':
|
|
||||||
mykey = conf['gpg']['mygpgkey']
|
|
||||||
if gpghome == '':
|
|
||||||
# Let's try the default.
|
|
||||||
gpghome = '{0}/.gnupg'.format(os.path.expanduser("~"))
|
|
||||||
else:
|
|
||||||
# No key ID was specified.
|
|
||||||
if gpghome == '':
|
|
||||||
# We'll generate a key if we can't find one here.
|
|
||||||
gpghome = build['dlpath'] + '/.gnupg'
|
|
||||||
killStaleAgent(conf)
|
|
||||||
os.environ['GNUPGHOME'] = gpghome
|
|
||||||
gpg = gpgme.Context()
|
|
||||||
# do we need to add a keyserver?
|
|
||||||
if len(gpgkeyserver) != 0:
|
|
||||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
|
||||||
for s in gpgkeyserver:
|
|
||||||
if os.path.isfile(dirmgr):
|
|
||||||
with open(dirmgr, 'r+') as f:
|
|
||||||
findme = any(s in line for line in f)
|
|
||||||
if not findme:
|
|
||||||
f.seek(0, os.SEEK_END)
|
|
||||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
|
||||||
bdisk['pname'],
|
|
||||||
s))
|
|
||||||
if mykey:
|
|
||||||
try:
|
|
||||||
pkeys.append(gpg.get_key(mykey, True))
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: You specified using {1} but we have no secret key for that ID!'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
mykey))
|
|
||||||
else:
|
|
||||||
for key in gpg.keylist(None, True):
|
|
||||||
if key.can_sign:
|
|
||||||
pkeys.append(key)
|
|
||||||
break
|
|
||||||
if len(pkeys) == 0:
|
|
||||||
print("{0}: [GPG] Generating a GPG key...".format(datetime.datetime.now()))
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
tpl = env.get_template('GPG.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
privkey = gpg.get_key(gpg.genkey(tpl_out).fpr, True)
|
|
||||||
pkeys.append(privkey)
|
|
||||||
# do we need to add a keyserver? this is for the freshly-generated GNUPGHOME
|
|
||||||
if len(gpgkeyserver) != 0:
|
|
||||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
|
||||||
for s in gpgkeyserver:
|
|
||||||
with open(dirmgr, 'r+') as f:
|
|
||||||
findme = any(s in line for line in f)
|
|
||||||
if not findme:
|
|
||||||
f.seek(0, os.SEEK_END)
|
|
||||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
|
||||||
bdisk['pname'],
|
|
||||||
s))
|
|
||||||
gpg.signers = pkeys
|
|
||||||
# Now we try to find and add the key for the base image.
|
|
||||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_EXTERN # remote (keyserver)
|
|
||||||
if len(distkeys) > 0: # testing
|
|
||||||
for k in distkeys:
|
|
||||||
key = gpg.get_key(k)
|
|
||||||
importkey = key.subkeys[0].fpr
|
|
||||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_LOCAL # local keyring (default)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
print('{0}: [GPG] Importing {1} and signing it for verification purposes...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
distkey))
|
|
||||||
cmd = ['/usr/bin/gpg',
|
|
||||||
'--recv-keys',
|
|
||||||
'--batch',
|
|
||||||
'--yes',
|
|
||||||
'0x{0}'.format(importkey)]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
sigkeys = []
|
|
||||||
for i in gpg.get_key(importkey).subkeys:
|
|
||||||
sigkeys.append(i.fpr)
|
|
||||||
cmd = ['/usr/bin/gpg',
|
|
||||||
'--batch',
|
|
||||||
'--yes',
|
|
||||||
'--lsign-key',
|
|
||||||
'0x{0}'.format(importkey)]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# We need to expose this key to the chroots, too, so we need to export it.
|
|
||||||
with open('{0}/gpgkey.pub'.format(dlpath), 'wb') as f:
|
|
||||||
gpg.export(pkeys[0].subkeys[0].keyid, f)
|
|
||||||
return(gpg)
|
|
||||||
|
|
||||||
def killStaleAgent(conf):
|
|
||||||
# Kill off any stale GPG agents running.
|
|
||||||
# Probably not even needed, but good to have.
|
|
||||||
chrootdir = conf['build']['chrootdir']
|
|
||||||
gpgpath = conf['gpg']['mygpghome']
|
|
||||||
procs = psutil.process_iter()
|
|
||||||
plst = []
|
|
||||||
for p in procs:
|
|
||||||
if (p.name() in ('gpg-agent', 'dirmngr') and p.uids()[0] == os.getuid()):
|
|
||||||
pd = psutil.Process(p.pid).as_dict()
|
|
||||||
for d in (chrootdir, gpgpath):
|
|
||||||
if pd['cwd'].startswith('{0}'.format(d)):
|
|
||||||
plst.append(p.pid)
|
|
||||||
if len(plst) >= 1:
|
|
||||||
for p in plst:
|
|
||||||
psutil.Process(p).terminate()
|
|
||||||
|
|
||||||
def signIMG(path, conf):
|
|
||||||
if conf['build']['sign']:
|
|
||||||
# Do we want to kill off any stale gpg-agents? (So we spawn a new one)
|
|
||||||
# Requires further testing.
|
|
||||||
#killStaleAgent()
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
print('{0}: [GPG] Signing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
# May not be necessary; further testing necessary
|
|
||||||
#if os.getenv('GPG_AGENT_INFO'):
|
|
||||||
# del os.environ['GPG_AGENT_INFO']
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
# ASCII-armor (.asc)
|
|
||||||
gpg.armor = True
|
|
||||||
data_in = open(path, 'rb')
|
|
||||||
sigbuf = BytesIO()
|
|
||||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
|
||||||
_ = sigbuf.seek(0)
|
|
||||||
_ = data_in.seek(0)
|
|
||||||
data_in.close()
|
|
||||||
with open('{0}.asc'.format(path), 'wb') as f:
|
|
||||||
f.write(sigbuf.read())
|
|
||||||
print('{0}: [GPG] Wrote {1}.asc (ASCII-armored signature).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
# Binary signature (.sig)
|
|
||||||
gpg.armor = False
|
|
||||||
data_in = open(path, 'rb')
|
|
||||||
sigbuf = BytesIO()
|
|
||||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
|
||||||
_ = sigbuf.seek(0)
|
|
||||||
_ = data_in.seek(0)
|
|
||||||
data_in.close()
|
|
||||||
with open('{0}.sig'.format(path), 'wb') as f:
|
|
||||||
f.write(sigbuf.read())
|
|
||||||
print('{0}: [GPG] Wrote {1}.sig (binary signature).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
|
|
||||||
def gpgVerify(sigfile, datafile, conf):
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
fullkeys = []
|
|
||||||
print('{0}: [GPG] Verifying {1} with {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile,
|
|
||||||
sigfile))
|
|
||||||
keylst = gpg.keylist()
|
|
||||||
for k in keylst:
|
|
||||||
fullkeys.append(k.subkeys[0].fpr)
|
|
||||||
with open(sigfile,'rb') as s:
|
|
||||||
with open(datafile, 'rb') as f:
|
|
||||||
sig = gpg.verify(s, f, None)
|
|
||||||
for x in sig:
|
|
||||||
if x.validity <= 1:
|
|
||||||
if not x.validity_reason:
|
|
||||||
reason = 'we require a signature trust of 2 or higher'
|
|
||||||
else:
|
|
||||||
reason = x.validity_reason
|
|
||||||
print('{0}: [GPG] Key {1} failed to verify: {2}'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
x.fpr,
|
|
||||||
reason))
|
|
||||||
verified = False
|
|
||||||
skeys = []
|
|
||||||
for k in sig:
|
|
||||||
skeys.append(k.fpr)
|
|
||||||
if k.fpr in fullkeys:
|
|
||||||
verified = True
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if verified:
|
|
||||||
print('{0}: [GPG] {1} verified (success).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile))
|
|
||||||
else:
|
|
||||||
print('{0}: [GPG] {1} failed verification!'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile))
|
|
||||||
return(verified)
|
|
||||||
|
|
||||||
def delTempKeys(conf):
|
|
||||||
# Create a config option to delete these.
|
|
||||||
# It's handy to keep these keys, but I'd understand if
|
|
||||||
# people didn't want to use them.
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']:
|
|
||||||
keys = []
|
|
||||||
if conf['gpgkey'] != '':
|
|
||||||
keys.append(gpg.get_key(conf['gpgkey']))
|
|
||||||
if conf['mygpghome'] == '':
|
|
||||||
keys.append(gpg.get_key(None, True)) # this is safe; we generated our own
|
|
||||||
for k in keys:
|
|
||||||
gpg.delete(k)
|
|
||||||
killStaleAgent(conf)
|
|
196
bdisk/bSSL.py
196
bdisk/bSSL.py
@ -1,196 +0,0 @@
|
|||||||
import OpenSSL
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import datetime
|
|
||||||
import re
|
|
||||||
|
|
||||||
def verifyCert(cert, key, CA = None):
|
|
||||||
# Verify a given certificate against a certificate.
|
|
||||||
# Optionally verify against a CA certificate as well (Hopefully. If/when PyOpenSSL ever supports it.)
|
|
||||||
chk = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
|
|
||||||
chk.use_privatekey(key)
|
|
||||||
chk.use_certificate(cert)
|
|
||||||
try:
|
|
||||||
chk.check_privatekey()
|
|
||||||
except OpenSSL.SSL.Error:
|
|
||||||
return(False)
|
|
||||||
exit(("{0}: {1} does not match {2}!".format(datetime.datetime.now(), key, cert)))
|
|
||||||
else:
|
|
||||||
print("{0}: [SSL] Verified {1} against {2} successfully.".format(datetime.datetime.now(), key, cert))
|
|
||||||
return(True)
|
|
||||||
# This is disabled because there doesn't seem to currently be any way
|
|
||||||
# to actually verify certificates against a given CA.
|
|
||||||
#if CA:
|
|
||||||
# try:
|
|
||||||
# magic stuff here
|
|
||||||
|
|
||||||
def sslCAKey(conf):
|
|
||||||
# TODO: use path from conf, even if it doesn't exist?
|
|
||||||
# if it does, read it into a pkey object
|
|
||||||
keyfile = conf['ipxe']['ssl_cakey']
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
key = OpenSSL.crypto.PKey()
|
|
||||||
print("{0}: [SSL] Generating SSL CA key...".format(datetime.datetime.now()))
|
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
|
||||||
with open(keyfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
|
||||||
return(key)
|
|
||||||
|
|
||||||
def sslCA(conf, key = None):
|
|
||||||
# NOTE: 'key' is a pkey OBJECT, not a file.
|
|
||||||
keyfile = conf['ipxe']['ssl_cakey']
|
|
||||||
crtfile = conf['ipxe']['ssl_ca']
|
|
||||||
if not key:
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
exit('{0}: ERROR: We need a key to generate a CA certificate!'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
if os.path.isfile(crtfile):
|
|
||||||
try:
|
|
||||||
ca = OpenSSL.crypto.load_certificate(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(crtfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL certificate.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
crtfile))
|
|
||||||
else:
|
|
||||||
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
|
|
||||||
conf['ipxe']['uri'],
|
|
||||||
flags=re.IGNORECASE)).lower()
|
|
||||||
# http://www.pyopenssl.org/en/stable/api/crypto.html#pkey-objects
|
|
||||||
# http://docs.ganeti.org/ganeti/2.14/html/design-x509-ca.html
|
|
||||||
ca = OpenSSL.crypto.X509()
|
|
||||||
ca.set_version(3)
|
|
||||||
ca.set_serial_number(1)
|
|
||||||
#ca.get_subject().CN = domain
|
|
||||||
ca.get_subject().CN = '{0} CA'.format(conf['bdisk']['name'])
|
|
||||||
ca.gmtime_adj_notBefore(0)
|
|
||||||
# valid for ROUGHLY 10 years. years(ish) * days * hours * mins * secs.
|
|
||||||
# the paramater is in seconds, which is why we need to multiply them all together.
|
|
||||||
ca.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
|
|
||||||
ca.set_issuer(ca.get_subject())
|
|
||||||
ca.set_pubkey(key)
|
|
||||||
ca.add_extensions([
|
|
||||||
OpenSSL.crypto.X509Extension(b"basicConstraints",
|
|
||||||
True,
|
|
||||||
b"CA:TRUE, pathlen:0"),
|
|
||||||
OpenSSL.crypto.X509Extension(b"keyUsage",
|
|
||||||
True,
|
|
||||||
b"keyCertSign, cRLSign"),
|
|
||||||
OpenSSL.crypto.X509Extension(b"subjectKeyIdentifier",
|
|
||||||
False,
|
|
||||||
b"hash",
|
|
||||||
subject = ca),])
|
|
||||||
ca.sign(key, "sha512")
|
|
||||||
with open(crtfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
|
|
||||||
return(ca)
|
|
||||||
|
|
||||||
def sslCKey(conf):
|
|
||||||
keyfile = conf['ipxe']['ssl_key']
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
key = OpenSSL.crypto.PKey()
|
|
||||||
print("{0}: [SSL] Generating SSL Client key...".format(datetime.datetime.now()))
|
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
|
||||||
with open(keyfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
|
||||||
return(key)
|
|
||||||
|
|
||||||
def sslCSR(conf, key = None):
|
|
||||||
# NOTE: 'key' is a pkey OBJECT, not a file.
|
|
||||||
keyfile = conf['ipxe']['ssl_key']
|
|
||||||
crtfile = conf['ipxe']['ssl_crt']
|
|
||||||
if not key:
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
exit('{0}: ERROR: We need a key to generate a CSR!'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
|
|
||||||
conf['ipxe']['uri'],
|
|
||||||
flags=re.IGNORECASE)).lower()
|
|
||||||
csr = OpenSSL.crypto.X509Req()
|
|
||||||
csr.get_subject().CN = domain
|
|
||||||
#req.get_subject().countryName = 'xxx'
|
|
||||||
#req.get_subject().stateOrProvinceName = 'xxx'
|
|
||||||
#req.get_subject().localityName = 'xxx'
|
|
||||||
#req.get_subject().organizationName = 'xxx'
|
|
||||||
#req.get_subject().organizationalUnitName = 'xxx'
|
|
||||||
csr.set_pubkey(key)
|
|
||||||
csr.sign(key, "sha512")
|
|
||||||
with open('/tmp/main.csr', 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_PEM, csr))
|
|
||||||
return(csr)
|
|
||||||
|
|
||||||
def sslSign(conf, ca, key, csr):
|
|
||||||
#ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, ca)
|
|
||||||
#ca_key = OpenSSL.crypto.load_privatekey(key)
|
|
||||||
#req = OpenSSL.crypto.load_certificate_request(csr)
|
|
||||||
csr = OpenSSL.crypto.load_certificate_request(OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open("/tmp/main.csr").read())
|
|
||||||
cert = OpenSSL.crypto.X509()
|
|
||||||
cert.set_subject(csr.get_subject())
|
|
||||||
cert.set_serial_number(1)
|
|
||||||
cert.gmtime_adj_notBefore(0)
|
|
||||||
cert.gmtime_adj_notAfter(24 * 60 * 60)
|
|
||||||
cert.set_issuer(ca.get_subject())
|
|
||||||
cert.set_pubkey(csr.get_pubkey())
|
|
||||||
#cert.set_pubkey(ca.get_pubkey())
|
|
||||||
cert.sign(key, "sha512")
|
|
||||||
with open(conf['ipxe']['ssl_crt'], 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
|
|
||||||
return(cert)
|
|
||||||
|
|
||||||
def sslPKI(conf):
|
|
||||||
# run checks for everything, gen what's missing
|
|
||||||
ssldir = conf['ipxe']['ssldir']
|
|
||||||
os.makedirs(ssldir, exist_ok = True)
|
|
||||||
certfile = conf['ipxe']['ssl_crt']
|
|
||||||
key = sslCAKey(conf)
|
|
||||||
ca = sslCA(conf, key = key)
|
|
||||||
ckey = sslCKey(conf)
|
|
||||||
if os.path.isfile(certfile):
|
|
||||||
cert = OpenSSL.crypto.load_certificate(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(certfile).read())
|
|
||||||
if not verifyCert(cert, ckey):
|
|
||||||
csr = sslCSR(conf, ckey)
|
|
||||||
cert = sslSign(conf, ca, key, csr)
|
|
||||||
else:
|
|
||||||
csr = sslCSR(conf, ckey)
|
|
||||||
cert = sslSign(conf, ca, key, csr)
|
|
||||||
return(cert)
|
|
156
bdisk/bchroot.py
156
bdisk/bchroot.py
@ -1,156 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import psutil
|
|
||||||
import subprocess
|
|
||||||
import datetime
|
|
||||||
import tarfile
|
|
||||||
import humanize
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
|
|
||||||
def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
|
|
||||||
# MOUNT the chroot
|
|
||||||
mountpoints = psutil.disk_partitions(all = True)
|
|
||||||
mounts = []
|
|
||||||
for m in mountpoints:
|
|
||||||
mounts.append(m.mountpoint)
|
|
||||||
cmounts = {}
|
|
||||||
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
|
||||||
cmounts[m] = None
|
|
||||||
# chroot (bind mount... onto itself. it's so stupid, i know. see https://bugs.archlinux.org/task/46169)
|
|
||||||
if chrootdir not in mounts:
|
|
||||||
cmounts['chroot'] = ['/bin/mount',
|
|
||||||
'--bind',
|
|
||||||
chrootdir,
|
|
||||||
chrootdir]
|
|
||||||
# resolv
|
|
||||||
if (chrootdir + '/etc/resolv.conf') not in mounts:
|
|
||||||
cmounts['resolv'] = ['/bin/mount',
|
|
||||||
'--bind',
|
|
||||||
'-o', 'ro',
|
|
||||||
'/etc/resolv.conf',
|
|
||||||
chrootdir + '/etc/resolv.conf']
|
|
||||||
# proc
|
|
||||||
if (chrootdir + '/proc') not in mounts:
|
|
||||||
cmounts['proc'] = ['/bin/mount',
|
|
||||||
'-t', 'proc',
|
|
||||||
'-o', 'nosuid,noexec,nodev',
|
|
||||||
'proc',
|
|
||||||
chrootdir + '/proc']
|
|
||||||
# sys
|
|
||||||
if (chrootdir + '/sys') not in mounts:
|
|
||||||
cmounts['sys'] = ['/bin/mount',
|
|
||||||
'-t', 'sysfs',
|
|
||||||
'-o', 'nosuid,noexec,nodev,ro',
|
|
||||||
'sys',
|
|
||||||
chrootdir + '/sys']
|
|
||||||
# efi (if it exists on the host)
|
|
||||||
if '/sys/firmware/efi/efivars' in mounts:
|
|
||||||
if (chrootdir + '/sys/firmware/efi/efivars') not in mounts:
|
|
||||||
cmounts['efi'] = ['/bin/mount',
|
|
||||||
'-t', 'efivarfs',
|
|
||||||
'-o', 'nosuid,noexec,nodev',
|
|
||||||
'efivarfs',
|
|
||||||
chrootdir + '/sys/firmware/efi/efivars']
|
|
||||||
# dev
|
|
||||||
if (chrootdir + '/dev') not in mounts:
|
|
||||||
cmounts['dev'] = ['/bin/mount',
|
|
||||||
'-t', 'devtmpfs',
|
|
||||||
'-o', 'mode=0755,nosuid',
|
|
||||||
'udev',
|
|
||||||
chrootdir + '/dev']
|
|
||||||
# pts
|
|
||||||
if (chrootdir + '/dev/pts') not in mounts:
|
|
||||||
cmounts['pts'] = ['/bin/mount',
|
|
||||||
'-t', 'devpts',
|
|
||||||
'-o', 'mode=0620,gid=5,nosuid,noexec',
|
|
||||||
'devpts',
|
|
||||||
chrootdir + '/dev/pts']
|
|
||||||
# shm (if it exists on the host)
|
|
||||||
if '/dev/shm' in mounts:
|
|
||||||
if (chrootdir + '/dev/shm') not in mounts:
|
|
||||||
cmounts['shm'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'mode=1777,nosuid,nodev',
|
|
||||||
'shm',
|
|
||||||
chrootdir + '/dev/shm']
|
|
||||||
# run (if it exists on the host)
|
|
||||||
if '/run' in mounts:
|
|
||||||
if (chrootdir + '/run') not in mounts:
|
|
||||||
cmounts['run'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'nosuid,nodev,mode=0755',
|
|
||||||
'run',
|
|
||||||
chrootdir + '/run']
|
|
||||||
# tmp (if it exists on the host)
|
|
||||||
if '/tmp' in mounts:
|
|
||||||
if (chrootdir + '/tmp') not in mounts:
|
|
||||||
cmounts['tmp'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'mode=1777,strictatime,nodev,nosuid',
|
|
||||||
'tmp',
|
|
||||||
chrootdir + '/tmp']
|
|
||||||
# the order we mount here is VERY IMPORTANT. Sure, we could do "for m in cmounts:", but dicts aren't ordered until python 3.6
|
|
||||||
# and this is SO important it's best that we be explicit as possible while we're still in alpha/beta stage. TODO?
|
|
||||||
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
|
||||||
if cmounts[m]:
|
|
||||||
subprocess.call(cmounts[m])
|
|
||||||
print("{0}: [CHROOT] Running '{1}' ({2}). PROGRESS: tail -f {2}/var/log/chroot_install.log ...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd,
|
|
||||||
chrootdir))
|
|
||||||
real_root = os.open("/", os.O_RDONLY)
|
|
||||||
os.chroot(chrootdir)
|
|
||||||
os.system('/root/pre-build.sh')
|
|
||||||
os.fchdir(real_root)
|
|
||||||
os.chroot('.')
|
|
||||||
os.close(real_root)
|
|
||||||
if not os.path.isfile('{0}/sbin/init'.format(chrootdir)):
|
|
||||||
os.symlink('../lib/systemd/systemd', '{0}/sbin/init'.format(chrootdir))
|
|
||||||
return(chrootdir)
|
|
||||||
|
|
||||||
def chrootUnmount(chrootdir):
|
|
||||||
subprocess.call(['umount', '-lR', chrootdir])
|
|
||||||
|
|
||||||
def chrootTrim(build):
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
for a in arch:
|
|
||||||
# Compress the pacman and apacman caches.
|
|
||||||
for i in ('pacman', 'apacman'):
|
|
||||||
shutil.rmtree('{0}/root.{1}/var/cache/{2}'.format(chrootdir, a, i))
|
|
||||||
os.makedirs('{0}/root.{1}/usr/local/{2}'.format(chrootdir, a, i), exist_ok = True)
|
|
||||||
tarball = '{0}/root.{1}/usr/local/{2}/{2}.db.tar.xz'.format(chrootdir, a, i)
|
|
||||||
dbdir = '{0}/root.{1}/var/lib/{2}/local'.format(chrootdir, a, i)
|
|
||||||
if os.path.isdir(dbdir):
|
|
||||||
print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
chrootdir + '/root.' + a,
|
|
||||||
i))
|
|
||||||
if os.path.isfile(tarball):
|
|
||||||
os.remove(tarball)
|
|
||||||
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
|
|
||||||
tar.add(dbdir, arcname = os.path.basename(dbdir))
|
|
||||||
shutil.rmtree(dbdir, ignore_errors = True)
|
|
||||||
print("{0}: [CHROOT] Created {1} ({2}). {3} cleared.".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball,
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball)),
|
|
||||||
dbdir))
|
|
||||||
#for d in ('etc/pacman.d/gnupg', 'var/empty/.gnupg'): # actually, we should probably keep these.
|
|
||||||
# they don't take much space, and it's a PITA to pacman-key --init && pacman-key --populate again on boot.
|
|
||||||
# if os.path.isdir('{0}/root.{1}/{2}'.format(chrootdir, a, d)):
|
|
||||||
# shutil.rmtree('{0}/root.{1}/{2}'.format(chrootdir, a, d))
|
|
||||||
# TODO: move the self-cleanup in pre-build.sh to here.
|
|
||||||
delme = [#'/root/.gnupg', # see above
|
|
||||||
'/root/.bash_history',
|
|
||||||
#'/var/log/chroot_install.log', # disable for now. maybe always disable if debug is enabled? TODO.
|
|
||||||
'/.git',
|
|
||||||
'/root/.viminfo']
|
|
||||||
for i in delme:
|
|
||||||
fullpath = '{0}/root.{1}{2}'.format(chrootdir, a, i)
|
|
||||||
if os.path.isfile(fullpath):
|
|
||||||
os.remove(fullpath)
|
|
||||||
elif os.path.isdir(fullpath):
|
|
||||||
shutil.rmtree(fullpath, ignore_errors = True)
|
|
@ -1,71 +0,0 @@
|
|||||||
#!/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import host
|
|
||||||
import prep
|
|
||||||
import bchroot
|
|
||||||
import build
|
|
||||||
import datetime
|
|
||||||
import bSSL
|
|
||||||
import ipxe
|
|
||||||
import bsync
|
|
||||||
import bGPG
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def bdisk(args):
|
|
||||||
# we also need to figure out how to implement "mentos" (old bdisk) like functionality, letting us reuse an
|
|
||||||
# existing chroot install if possible to save time for future builds.
|
|
||||||
# if not, though, it's no big deal.
|
|
||||||
if os.getuid() != 0:
|
|
||||||
exit('{0}: ERROR: BDisk *must* be run as the root user or with sudo!'.format(datetime.datetime.now()))
|
|
||||||
print('{0}: Starting.'.format(datetime.datetime.now()))
|
|
||||||
conf = host.parseConfig(host.getConfig(conf_file = args['buildini']))[1]
|
|
||||||
prep.dirChk(conf)
|
|
||||||
conf['gpgobj'] = bGPG.genGPG(conf)
|
|
||||||
prep.buildChroot(conf, keep = False)
|
|
||||||
prep.prepChroot(conf)
|
|
||||||
arch = conf['build']['arch']
|
|
||||||
bGPG.killStaleAgent(conf)
|
|
||||||
for a in arch:
|
|
||||||
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
|
|
||||||
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
|
|
||||||
prep.postChroot(conf)
|
|
||||||
bchroot.chrootTrim(conf['build'])
|
|
||||||
build.genImg(conf)
|
|
||||||
build.genUEFI(conf['build'], conf['bdisk'])
|
|
||||||
fulliso = build.genISO(conf)
|
|
||||||
bGPG.signIMG(fulliso['Main']['file'], conf)
|
|
||||||
build.displayStats(fulliso)
|
|
||||||
if conf['build']['ipxe']:
|
|
||||||
bSSL.sslPKI(conf)
|
|
||||||
ipxe.buildIPXE(conf)
|
|
||||||
iso = ipxe.genISO(conf)
|
|
||||||
if iso:
|
|
||||||
for x in iso.keys():
|
|
||||||
if x != 'name':
|
|
||||||
path = iso[x]['file']
|
|
||||||
bGPG.signIMG(path, conf)
|
|
||||||
build.displayStats(iso)
|
|
||||||
bsync.http(conf)
|
|
||||||
bsync.tftp(conf)
|
|
||||||
bsync.git(conf)
|
|
||||||
bsync.rsync(conf)
|
|
||||||
print('{0}: Finish.'.format(datetime.datetime.now()))
|
|
||||||
|
|
||||||
def parseArgs():
|
|
||||||
args = argparse.ArgumentParser(description = 'BDisk - a tool for building live/rescue media.',
|
|
||||||
epilog = 'brent s. || 2017 || https://bdisk.square-r00t.net')
|
|
||||||
args.add_argument('buildini',
|
|
||||||
metavar = '/path/to/build.ini',
|
|
||||||
default = '/etc/bdisk/build.ini',
|
|
||||||
nargs = '?',
|
|
||||||
help = 'The full/absolute path to the build.ini to use for this run. The default is /etc/bdisk/build.ini, but see https://bdisk.square-r00t.net/#the_code_build_ini_code_file.')
|
|
||||||
return(args)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = vars(parseArgs().parse_args())
|
|
||||||
bdisk(args)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
933
bdisk/bdisk.xsd
Normal file
933
bdisk/bdisk.xsd
Normal file
@ -0,0 +1,933 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
|
||||||
|
targetNamespace="http://bdisk.square-r00t.net/"
|
||||||
|
xmlns="http://bdisk.square-r00t.net/"
|
||||||
|
elementFormDefault="qualified">
|
||||||
|
|
||||||
|
<!-- CUSTOM TYPES -->
|
||||||
|
<!-- t_btag_uri: a string that will allow btags (xpath or variable only) or a URI string (but NOT a URN). -->
|
||||||
|
<!-- We can't use xs:anyURI because it is too loose (allows things like relative paths, etc.) -->
|
||||||
|
<!-- but ALSO too restrictive in that btags fail validation ({ and } are invalid for anyURI, -->
|
||||||
|
<!-- ironically). -->
|
||||||
|
<xs:simpleType name="t_btag_uri">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="\w+:(/?/?)[^\s]+"/>
|
||||||
|
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||||
|
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_btag_uri -->
|
||||||
|
|
||||||
|
<!-- t_filename: a POSIX fully-portable filename. -->
|
||||||
|
<xs:simpleType name="t_filename">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="([a-z0-9._-]+){1,255}"/>
|
||||||
|
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||||
|
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||||
|
<!-- We don't allow (string)(regex) or (regex)(string) or (string)(regex)(string) or multiple regexes -->
|
||||||
|
<!-- because that's just... not feasible to manage from a parsing perspective. -->
|
||||||
|
<xs:pattern value="\{regex%.+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_filename -->
|
||||||
|
|
||||||
|
<!-- t_gpg_keyid: a set of various patterns that match GPG key IDs. -->
|
||||||
|
<xs:simpleType name="t_gpg_keyid">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="(none|new)"/>
|
||||||
|
<xs:pattern value="(auto|default)"/>
|
||||||
|
<xs:pattern value="(0x)?[0-9A-Fa-f]{40}"/>
|
||||||
|
<xs:pattern value="(0x)?[0-9A-Fa-f]{16}"/>
|
||||||
|
<xs:pattern value="(0x)?[0-9A-Fa-f]{8}"/>
|
||||||
|
<xs:pattern value="([0-9A-Fa-f ]{4}){5} ?([0-9A-Fa-f ]{4}){4}[0-9A-Fa-f]{4}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_gpg_keyid -->
|
||||||
|
|
||||||
|
<!-- t_gpg_keyid_list: a type for a list of key IDs. -->
|
||||||
|
<xs:simpleType name="t_gpg_keyid_list">
|
||||||
|
<xs:list itemType="t_gpg_keyid"/>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_gpg_key_list -->
|
||||||
|
|
||||||
|
<!-- t_net_loc: a remote host. Used for PKI Subject's commonName and host for rsync. -->
|
||||||
|
<xs:simpleType name="t_net_loc">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern
|
||||||
|
value="(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_net_loc -->
|
||||||
|
|
||||||
|
<!-- t_pass_hash_algo: used for t_password. -->
|
||||||
|
<xs:simpleType name="t_pass_hash_algo">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="des"/>
|
||||||
|
<xs:enumeration value="md5"/>
|
||||||
|
<xs:enumeration value="sha256"/>
|
||||||
|
<xs:enumeration value="sha512"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_pass_hash_algo -->
|
||||||
|
|
||||||
|
<!-- t_pass_salt: used for t_password. -->
|
||||||
|
<xs:simpleType name="t_pass_salt">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="($[156]($rounds=[0-9]+)?$[a-zA-Z0-9./]{1,16}$?|auto|)"/>
|
||||||
|
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||||
|
<xs:pattern value="\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_pass_salt -->
|
||||||
|
|
||||||
|
<!-- t_password: used for rootpass and user/password elements. -->
|
||||||
|
<xs:complexType name="t_password">
|
||||||
|
<!-- The below will need some fleshing out and testing. It may not be possible strictly via XSD. -->
|
||||||
|
<!-- TODO: restrict the value further with a union or multi-group regex that checks for a valid length? -->
|
||||||
|
<!-- des: ????? -->
|
||||||
|
<!-- md5: "[a-zA-Z0-9./]{22}" -->
|
||||||
|
<!-- sha256: "[a-zA-Z0-9./]{43}" -->
|
||||||
|
<!-- sha512: "[a-zA-Z0-9./]{86}" -->
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="xs:string">
|
||||||
|
<xs:attribute name="hash_algo" type="t_pass_hash_algo" use="optional"/>
|
||||||
|
<xs:attribute name="hashed" type="xs:boolean" use="required"/>
|
||||||
|
<xs:attribute name="salt" type="t_pass_salt" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
<!-- END t_password -->
|
||||||
|
|
||||||
|
<!-- t_path: for specifying subdirectories (either local filesystem or remote paths). -->
|
||||||
|
<xs:simpleType name="t_path">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- We include blank to operate on default actions (or default filepaths). -->
|
||||||
|
<xs:pattern value=""/>
|
||||||
|
<xs:pattern value="(.+)/([^/]+)"/>
|
||||||
|
<xs:pattern value="((.+)/([^/]+))?\{variable%[A-Za-z0-9_]\}((.+)/([^/]+))?"/>
|
||||||
|
<xs:pattern value="((.+)/([^/]+))?\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}((.+)/([^/]+))?"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_path -->
|
||||||
|
|
||||||
|
<!-- t_pki_cert: used for pki/ca/cert and pki/client/cert. -->
|
||||||
|
<xs:complexType name="t_pki_cert">
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="hash_algo" use="required">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="blake2b512"/>
|
||||||
|
<xs:enumeration value="blake2s256"/>
|
||||||
|
<xs:enumeration value="gost"/>
|
||||||
|
<xs:enumeration value="md4"/>
|
||||||
|
<xs:enumeration value="md5"/>
|
||||||
|
<xs:enumeration value="mdc2"/>
|
||||||
|
<xs:enumeration value="rmd160"/>
|
||||||
|
<xs:enumeration value="sha1"/>
|
||||||
|
<xs:enumeration value="sha224"/>
|
||||||
|
<xs:enumeration value="sha256"/>
|
||||||
|
<xs:enumeration value="sha384"/>
|
||||||
|
<xs:enumeration value="sha512"/>
|
||||||
|
<xs:enumeration value="none"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
<!-- END t_pki_cert -->
|
||||||
|
|
||||||
|
<!-- t_pki_key: used for pki/ca/key and pki/client/key -->
|
||||||
|
<xs:complexType name="t_pki_key">
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="cipher" use="required">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="aes128"/>
|
||||||
|
<xs:enumeration value="aes192"/>
|
||||||
|
<xs:enumeration value="bf"/>
|
||||||
|
<xs:enumeration value="blowfish"/>
|
||||||
|
<xs:enumeration value="camellia128"/>
|
||||||
|
<xs:enumeration value="camellia192"/>
|
||||||
|
<xs:enumeration value="camellia256"/>
|
||||||
|
<xs:enumeration value="des"/>
|
||||||
|
<xs:enumeration value="rc2"/>
|
||||||
|
<xs:enumeration value="seed"/>
|
||||||
|
<xs:enumeration value="none"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
<xs:attribute name="passphrase" type="xs:string"/>
|
||||||
|
<xs:attribute name="keysize"
|
||||||
|
type="xs:positiveInteger"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
<!-- END t_pki_key -->
|
||||||
|
|
||||||
|
<!-- t_pki_subject: used for pki/ca/subject and pki/client/subject -->
|
||||||
|
<xs:complexType name="t_pki_subject">
|
||||||
|
<xs:all>
|
||||||
|
<!-- .../SUBJECT/COMMONNAME -->
|
||||||
|
<xs:element name="commonName" type="t_net_loc"/>
|
||||||
|
<!-- END .../SUBJECT/COMMONNAME -->
|
||||||
|
<!-- .../SUBJECT/COUNTRYNAME -->
|
||||||
|
<xs:element name="countryName">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- We can't validate an actual ISO-3166 ALPHA-2 code, but we can validate the format. -->
|
||||||
|
<!-- TODO: maybe cron the generation of an external namespace? -->
|
||||||
|
<xs:pattern value="[A-Z]{2}"/>
|
||||||
|
<xs:pattern value=".*\{variable%[A-Za-z0-9_]\}.*"/>
|
||||||
|
<xs:pattern value=".*\{xpath%["'A-Za-z0-9_/\(\)\.\*@\-\[\]=]+\}.*"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END .../SUBJECT/COUNTRYNAME -->
|
||||||
|
<!-- .../SUBJECT/LOCALITYNAME -->
|
||||||
|
<xs:element name="localityName" type="xs:string"/>
|
||||||
|
<!-- END .../SUBJECT/LOCALITYNAME -->
|
||||||
|
<!-- .../SUBJECT/STATEORPROVINCENAME -->
|
||||||
|
<xs:element name="stateOrProvinceName"
|
||||||
|
type="xs:string"/>
|
||||||
|
<!-- END .../SUBJECT/STATEORPROVINCENAME -->
|
||||||
|
<!-- .../SUBJECT/ORGANIZATION -->
|
||||||
|
<xs:element name="organization" type="xs:string"/>
|
||||||
|
<!-- END .../SUBJECT/ORGANIZATION -->
|
||||||
|
<!-- .../SUBJECT/ORGANIZATIONALUNITNAME -->
|
||||||
|
<xs:element name="organizationalUnitName"
|
||||||
|
type="xs:string"/>
|
||||||
|
<!-- END .../SUBJECT/ORGANIZATIONALUNITNAME -->
|
||||||
|
<!-- .../SUBJECT/EMAILADDRESS -->
|
||||||
|
<xs:element name="emailAddress" type="xs:string"/>
|
||||||
|
<!-- END .../SUBJECT/EMAILADDRESS -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
<!-- END t_pki_subject -->
|
||||||
|
|
||||||
|
<!-- t_remote_file: an element that lets us define both a file pattern for remote content and flags attribute. -->
|
||||||
|
<xs:complexType name="t_remote_file">
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_filename">
|
||||||
|
<xs:attribute name="flags" type="t_remote_file_flags" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
<!-- END t_remote_file -->
|
||||||
|
|
||||||
|
<!-- t_remote_file_flags: a type to match a list of known flags. -->
|
||||||
|
<xs:simpleType name="t_remote_file_flags">
|
||||||
|
<xs:list>
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- Currently we only support two flags. -->
|
||||||
|
<xs:enumeration value="regex"/>
|
||||||
|
<xs:enumeration value="latest"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:list>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_remote_file_flags -->
|
||||||
|
|
||||||
|
<!-- t_username: enforce a POSIX-compliant username. Used for user/username elements. -->
|
||||||
|
<xs:simpleType name="t_username">
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}$)"/>
|
||||||
|
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||||
|
<xs:pattern value="\{xpath%["'A-Za-z0-9_\(\)\.\*\-/\[\]=]+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
<!-- END t_username -->
|
||||||
|
<!-- END CUSTOM TYPES -->
|
||||||
|
|
||||||
|
<!-- ROOT ELEMENT ("BDISK") -->
|
||||||
|
<xs:element name="bdisk">
|
||||||
|
<xs:complexType>
|
||||||
|
<!-- Should this be xs:sequence instead? -->
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE -->
|
||||||
|
<xs:element name="profile" maxOccurs="unbounded" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/META -->
|
||||||
|
<xs:element name="meta" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/META/NAMES -->
|
||||||
|
<xs:element name="names" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/META/NAMES/NAME -->
|
||||||
|
<xs:element name="name" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="[A-Z0-9]{1,8}"/>
|
||||||
|
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||||
|
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/NAMES/NAME -->
|
||||||
|
<!-- BDISK/PROFILE/META/NAMES/UXNAME -->
|
||||||
|
<xs:element name="uxname" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- refer to the 2009 POSIX spec, "3.282 Portable Filename Character Set" -->
|
||||||
|
<!-- http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282 -->
|
||||||
|
<!-- (We use this string to name some files.) -->
|
||||||
|
<xs:pattern value="([A-Za-z0-9._-]+){1,255}"/>
|
||||||
|
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||||
|
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/NAMES/UXNAME -->
|
||||||
|
<!-- BDISK/PROFILE/META/NAMES/PNAME -->
|
||||||
|
<xs:element name="pname" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- TODO: Can I use UTF-8 instead? -->
|
||||||
|
<!-- https://stackoverflow.com/a/9805789/733214 -->
|
||||||
|
<xs:pattern value="\p{IsBasicLatin}*"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/NAMES/PNAME -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/NAMES -->
|
||||||
|
<!-- BDISK/PROFILE/META/DESC -->
|
||||||
|
<xs:element name="desc" maxOccurs="1" minOccurs="1" type="xs:string"/>
|
||||||
|
<!-- END BDISK/PROFILE/META/DESC -->
|
||||||
|
<!-- BDISK/PROFILE/META/DEV -->
|
||||||
|
<xs:element name="dev" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/META/DEV/AUTHOR -->
|
||||||
|
<xs:element name="author" maxOccurs="1" minOccurs="1"
|
||||||
|
type="xs:normalizedString"/>
|
||||||
|
<!-- END BDISK/PROFILE/META/DEV/AUTHOR -->
|
||||||
|
<!-- BDISK/PROFILE/META/DEV/EMAIL -->
|
||||||
|
<!-- The following does NOT WORK. Shame, really. -->
|
||||||
|
<!-- It seems to be an invalid pattern per my XSD validator (xmllint). -->
|
||||||
|
<!--<xs:pattern value="([!#-'*+/-9=?A-Z^-~-]+(\.[!#-'*+/-9=?A-Z^-~-]+)*|"([]!#-[^-~ \t]|(\\[\t -~]))+")@([!#-'*+/-9=?A-Z^-~-]+(\.[!#-'*+/-9=?A-Z^-~-]+)*|\[[\t -Z^-~]*])"/>-->
|
||||||
|
<xs:element name="email" maxOccurs="1" minOccurs="1"
|
||||||
|
type="xs:normalizedString"/>
|
||||||
|
<!-- END BDISK/PROFILE/META/DEV/EMAIL -->
|
||||||
|
<!-- BDISK/PROFILE/META/DEV/WEBSITE -->
|
||||||
|
<xs:element name="website" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_btag_uri"/>
|
||||||
|
<!-- END BDISK/PROFILE/META/DEV/WEBSITE -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/DEV -->
|
||||||
|
<!-- BDISK/PROFILE/META/URI -->
|
||||||
|
<xs:element name="uri" maxOccurs="1" minOccurs="1" type="t_btag_uri"/>
|
||||||
|
<!-- END BDISK/PROFILE/META/URI -->
|
||||||
|
<!-- BDISK/PROFILE/META/VER -->
|
||||||
|
<xs:element name="ver" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:normalizedString">
|
||||||
|
<!-- Like ../names/uxname, this is also used to name certain files so, POSIX portable filename. -->
|
||||||
|
<xs:pattern value="([A-Za-z0-9._-]+){1,255}"/>
|
||||||
|
<xs:pattern value="\{variable%[A-Za-z0-9_]\}"/>
|
||||||
|
<xs:pattern value="\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/VER -->
|
||||||
|
<!-- BDISK/PROFILE/META/MAX_RECURSE -->
|
||||||
|
<xs:element name="max_recurse" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:positiveInteger">
|
||||||
|
<xs:maxExclusive value="1000"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/MAX_RECURSE -->
|
||||||
|
<!-- BDISK/PROFILE/META/REGEXES -->
|
||||||
|
<xs:element name="regexes" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/META/REGEXES/PATTERN -->
|
||||||
|
<xs:element name="pattern" maxOccurs="unbounded" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="xs:string">
|
||||||
|
<xs:attribute name="id" type="xs:string"
|
||||||
|
use="required"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/REGEXES/PATTERN -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/REGEXES -->
|
||||||
|
<!-- BDISK/PROFILE/META/VARIABLES -->
|
||||||
|
<xs:element name="variables" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/META/VARIABLES/VARIABLE -->
|
||||||
|
<xs:element name="variable" maxOccurs="unbounded" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="xs:string">
|
||||||
|
<xs:attribute name="id" type="xs:string"
|
||||||
|
use="required"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/VARIABLES/VARIABLE -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META/VARIABLES -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/META -->
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS -->
|
||||||
|
<xs:element name="accounts" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS/ROOTPASS -->
|
||||||
|
<xs:element name="rootpass" maxOccurs="1" minOccurs="1" type="t_password"/>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS/ROOTPASS -->
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS/USER -->
|
||||||
|
<xs:element name="user" maxOccurs="unbounded" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS/USER/USERNAME -->
|
||||||
|
<xs:element name="username" type="t_username" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS/USER/USERNAME -->
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS/USER/COMMENT -->
|
||||||
|
<!-- https://en.wikipedia.org/wiki/Gecos_field -->
|
||||||
|
<!-- Through experimentation, this *seems* to cap at 990 chars. -->
|
||||||
|
<xs:element name="comment" maxOccurs="1"
|
||||||
|
minOccurs="0">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:normalizedString">
|
||||||
|
<xs:maxLength value="990"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS/USER/COMMENT -->
|
||||||
|
<!-- BDISK/PROFILE/ACCOUNTS/USER/PASSWORD -->
|
||||||
|
<xs:element name="password" type="t_password" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS/USER/PASSWORD -->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="sudo" type="xs:boolean" use="optional"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS/USER -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/ACCOUNTS -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES -->
|
||||||
|
<xs:element name="sources" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDisk only supports two different architectures (x86/i686 and x86_64, respectively) currently. -->
|
||||||
|
<!-- TODO: future improvements may let us include e.g. two different x86_64 environments (e.g. CentOS and Debian on the same media), but this is like, still in development stages. -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE -->
|
||||||
|
<xs:element name="source" minOccurs="1" maxOccurs="2">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- We cheat here. TECHNICALLY it should ONLY be scheme://location (no /path...), but there isn't a data type for that. -->
|
||||||
|
<!-- Currently we enforce only one item. Future BDisk versions may be able to make use of multiple <mirror>s and select best one based on speed. -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE/MIRROR -->
|
||||||
|
<xs:element name="mirror" type="t_btag_uri" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE/MIRROR -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE/ROOTPATH -->
|
||||||
|
<xs:element name="rootpath" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE/ROOTPATH -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE/TARBALL -->
|
||||||
|
<xs:element name="tarball" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_remote_file"/>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE/TARBALL -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE/CHECKSUM -->
|
||||||
|
<xs:element name="checksum" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_remote_file">
|
||||||
|
<!-- There is NO way we can validate this, because it will vary based on the algorithms supported by the build host. -->
|
||||||
|
<xs:attribute name="hash_algo" type="xs:string"
|
||||||
|
use="required"/>
|
||||||
|
<xs:attribute name="explicit" type="xs:boolean"
|
||||||
|
use="required"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE/CHECKSUM -->
|
||||||
|
<!-- BDISK/PROFILE/SOURCES/SOURCE/SIG -->
|
||||||
|
<xs:element name="sig" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_remote_file">
|
||||||
|
<!-- Required; otherwise there's no point using it. -->
|
||||||
|
<xs:attribute name="keys" type="t_gpg_keyid_list"
|
||||||
|
use="required"/>
|
||||||
|
<xs:attribute name="keyserver" type="t_btag_uri"
|
||||||
|
use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE/SIG-->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="arch" use="required">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="(i686|x86(_64)?|32|64)"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES/SOURCE -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SOURCES -->
|
||||||
|
<!-- BDISK/PROFILE/PACKAGES -->
|
||||||
|
<xs:element name="packages" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/PACKAGES/PACKAGE -->
|
||||||
|
<xs:element name="package" maxOccurs="unbounded" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="xs:string">
|
||||||
|
<xs:attribute name="version" type="xs:string" use="optional"/>
|
||||||
|
<xs:attribute name="repo" type="xs:string" use="optional"/>
|
||||||
|
<!-- Default is "both" -->
|
||||||
|
<xs:attribute name="arch" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="(i686|x86(_64)?|32|64|both)"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/PACKAGES/PACKAGE -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/PACKAGES -->
|
||||||
|
<!-- BDISK/PROFILE/SERVICES -->
|
||||||
|
<xs:element name="services" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/SERVICES/SERVICE -->
|
||||||
|
<xs:element name="service" maxOccurs="unbounded" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="xs:string">
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="required"/>
|
||||||
|
<xs:attribute name="blacklisted" type="xs:boolean" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SERVICES/SERVICE -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SERVICES -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD -->
|
||||||
|
<xs:element name="build" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS -->
|
||||||
|
<xs:element name="paths">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/BASE -->
|
||||||
|
<xs:element name="base" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/BASE -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/CACHE -->
|
||||||
|
<xs:element name="cache" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/CACHE -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/CHROOT -->
|
||||||
|
<xs:element name="chroot" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/CHROOT -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/OVERLAY -->
|
||||||
|
<xs:element name="overlay" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/OVERLAY -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/TEMPLATES -->
|
||||||
|
<xs:element name="templates" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/TEMPLATES -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/MOUNT -->
|
||||||
|
<xs:element name="mount" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/MOUNT -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/DISTROS -->
|
||||||
|
<xs:element name="distros" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/DISTROS -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/DEST -->
|
||||||
|
<xs:element name="dest" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/DEST -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/ISO -->
|
||||||
|
<xs:element name="iso" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/ISO -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/HTTP -->
|
||||||
|
<xs:element name="http" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/HTTP -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/PATHS/TFTP -->
|
||||||
|
<xs:element name="tftp" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/TFTP -->
|
||||||
|
<!-- EBDISK/PROFILE/BUILD/PATHS/PKI -->
|
||||||
|
<xs:element name="pki" maxOccurs="1" minOccurs="1" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS/PKI -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/PATHS -->
|
||||||
|
<!-- BDISK/PROFILE/BUILD/BASEDISTRO -->
|
||||||
|
<xs:element name="basedistro"/>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD/BASEDISTRO -->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="its_full_of_stars" type="xs:boolean"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/BUILD -->
|
||||||
|
<!-- BDISK/PROFILE/ISO -->
|
||||||
|
<xs:element name="iso" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:attribute name="sign" type="xs:boolean"/>
|
||||||
|
<xs:attribute name="multi_arch">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="yes"/>
|
||||||
|
<xs:enumeration value="no"/>
|
||||||
|
<xs:enumeration value="true"/>
|
||||||
|
<xs:enumeration value="false"/>
|
||||||
|
<xs:enumeration value="x86_64"/>
|
||||||
|
<xs:enumeration value="x86"/>
|
||||||
|
<xs:enumeration value="64"/>
|
||||||
|
<xs:enumeration value="32"/>
|
||||||
|
<xs:enumeration value="i686"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/ISO -->
|
||||||
|
<!-- BDISK/PROFILE/IPXE -->
|
||||||
|
<xs:element name="ipxe" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/IPXE/URI -->
|
||||||
|
<xs:element name="uri" type="t_btag_uri" maxOccurs="1" minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/IPXE/URI -->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="sign" type="xs:boolean"/>
|
||||||
|
<xs:attribute name="iso" type="xs:boolean"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/IPXE -->
|
||||||
|
<!-- BDISK/PROFILE/GPG -->
|
||||||
|
<xs:element name="gpg" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/GPG/KEY -->
|
||||||
|
<xs:element name="key" minOccurs="0" maxOccurs="unbounded">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/GPG/KEY/NAME -->
|
||||||
|
<xs:element name="name" type="xs:normalizedString" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/GPG/KEY/NAME -->
|
||||||
|
<!-- BDISK/PROFILE/GPG/KEY/EMAIL -->
|
||||||
|
<xs:element name="email" type="xs:normalizedString" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/GPG/KEY/EMAIL -->
|
||||||
|
<!-- BDISK/PROFILE/GPG/KEY/COMMENT -->
|
||||||
|
<xs:element name="comment" type="xs:string" maxOccurs="1"
|
||||||
|
minOccurs="0"/>
|
||||||
|
<!-- END BDISK/PROFILE/GPG/KEY/COMMENT -->
|
||||||
|
<!-- BDISK/PROFILE/GPG/KEY/SUBKEY -->
|
||||||
|
<xs:element name="subkey" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<!-- See below for notes on attributes. -->
|
||||||
|
<!-- TODO: convert into shared type for parent as well? -->
|
||||||
|
<xs:attribute name="algo" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="rsa"/>
|
||||||
|
<xs:enumeration value="dsa"/>
|
||||||
|
<xs:enumeration value="ed"/>
|
||||||
|
<xs:enumeration value="nist"/>
|
||||||
|
<xs:enumeration value="brainpool.1"/>
|
||||||
|
<xs:enumeration value="sec.k1"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
<xs:attribute name="keysize" type="xs:positiveInteger" use="optional"/>
|
||||||
|
<xs:attribute name="expire" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:integer">
|
||||||
|
<xs:pattern value="(0|[0-9]{10})"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/GPG/KEY/SUBKEY -->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="algo" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<!-- rsa, dsa, and elgamal are "normal". Newer GnuPG supports ECC (yay!), so we have support for those in the XSD (you can get a list with gpg -with-colons -list-config curve | cut -f3 -d":" | tr ';' '\n'). -->
|
||||||
|
<!-- We test in-code if the host supports it. -->
|
||||||
|
<xs:enumeration value="rsa"/>
|
||||||
|
<xs:enumeration value="dsa"/>
|
||||||
|
<!-- The following only support encryption. The entire reason we'd be generating a key is to sign files, so we disable them. -->
|
||||||
|
<!-- <xs:enumeration value="elg"/> -->
|
||||||
|
<!-- <xs:enumeration value="cv"/> -->
|
||||||
|
<xs:enumeration value="ed"/>
|
||||||
|
<xs:enumeration value="nist"/>
|
||||||
|
<xs:enumeration value="brainpool.1"/>
|
||||||
|
<xs:enumeration value="sec.k1"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
<!-- We COULD constrain this further, but it's conditional upon the algo type. So we'll do that in BDisk itself. -->
|
||||||
|
<!-- But it may be possible? https://stackoverflow.com/a/39045446/733214 -->
|
||||||
|
<xs:attribute name="keysize" type="xs:positiveInteger" use="optional"/>
|
||||||
|
<!-- XSD doesn't have a datatype for Epoch vs. 0 (for no expire). -->
|
||||||
|
<xs:attribute name="expire" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<!--This is xs:integer instead of xs:positiveInteger because 0 will fail validation then. -->
|
||||||
|
<xs:restriction base="xs:integer">
|
||||||
|
<xs:pattern value="(0|[0-9]{10})"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/GPG/KEY -->
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="keyid" type="t_gpg_keyid" use="required"/>
|
||||||
|
<xs:attribute name="publish" type="xs:boolean" use="optional"/>
|
||||||
|
<xs:attribute name="prompt_passphrase" type="xs:boolean" use="required"/>
|
||||||
|
<xs:attribute name="passphrase" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern
|
||||||
|
value="[!"#$%&\\'\(\)\*\+,\-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}~ ]+"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
<xs:attribute name="gnupghome" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern value="(.+)/([^/]+)"/>
|
||||||
|
<xs:pattern
|
||||||
|
value="((.+)/([^/]+))?\{variable%[A-Za-z0-9_]\}((.+)/([^/]+))?"/>
|
||||||
|
<xs:pattern
|
||||||
|
value="((.+)/([^/]+))?\{xpath%[A-Za-z0-9_\(\)\.\*\-/]+\}((.+)/([^/]+))?"/>
|
||||||
|
<xs:pattern value="(none|)"/>
|
||||||
|
<xs:pattern value="(auto|default)"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/GPG -->
|
||||||
|
<!-- BDISK/PROFILE/PKI -->
|
||||||
|
<xs:element name="pki" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA -->
|
||||||
|
<xs:element name="ca" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/CERT -->
|
||||||
|
<xs:element name="cert" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_pki_cert"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/CERT -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/CSR -->
|
||||||
|
<xs:element name="csr" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/CSR -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/INDEX -->
|
||||||
|
<xs:element name="index" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/INDEX -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/SERIAL -->
|
||||||
|
<xs:element name="serial" maxOccurs="1" minOccurs="0"
|
||||||
|
type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/SERIAL -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/KEY -->
|
||||||
|
<xs:element name="key" minOccurs="1" maxOccurs="1"
|
||||||
|
type="t_pki_key"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/CSR -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CA/SUBJECT -->
|
||||||
|
<xs:element name="subject" maxOccurs="1" minOccurs="0"
|
||||||
|
type="t_pki_subject"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA/SUBJECT -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CA -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CLIENT -->
|
||||||
|
<xs:element name="client" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/PKI/CLIENT/CERT -->
|
||||||
|
<xs:element name="cert" maxOccurs="1" minOccurs="1"
|
||||||
|
type="t_pki_cert"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CLIENT/CERT -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||||
|
<xs:element name="csr" maxOccurs="1" minOccurs="0" type="t_path"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CLIENT/KEY -->
|
||||||
|
<xs:element name="key" minOccurs="1" maxOccurs="1"
|
||||||
|
type="t_pki_key"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CLIENT/CSR -->
|
||||||
|
<!-- BDISK/PROFILE/PKI/CLIENT/SUBJECT -->
|
||||||
|
<xs:element name="subject" maxOccurs="1" minOccurs="0"
|
||||||
|
type="t_pki_subject"/>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CLIENT/SUBJECT -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/PKI/CLIENT -->
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="overwrite" type="xs:boolean" use="required"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/PKI -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC -->
|
||||||
|
<xs:element name="sync" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:all>
|
||||||
|
<!-- BDISK/PROFILE/SYNC/IPXE -->
|
||||||
|
<xs:element name="ipxe" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/IPXE -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/TFTP -->
|
||||||
|
<xs:element name="tftp" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/TFTP -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/ISO -->
|
||||||
|
<xs:element name="iso" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/ISO -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/GPG -->
|
||||||
|
<xs:element name="gpg" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:simpleContent>
|
||||||
|
<xs:extension base="t_path">
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="optional"/>
|
||||||
|
<xs:attribute name="format" use="required">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:enumeration value="asc"/>
|
||||||
|
<xs:enumeration value="bin"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:extension>
|
||||||
|
</xs:simpleContent>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/GPG -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC -->
|
||||||
|
<xs:element name="rsync" maxOccurs="1" minOccurs="1">
|
||||||
|
<xs:complexType>
|
||||||
|
<xs:sequence>
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC/USER -->
|
||||||
|
<xs:element name="user" type="t_username" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/RSYNC/USER -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC/HOST -->
|
||||||
|
<xs:element name="host" type="t_net_loc" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/RSYNC/HOST -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC/PORT -->
|
||||||
|
<xs:element name="port" maxOccurs="1" minOccurs="0">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:positiveInteger">
|
||||||
|
<xs:minInclusive value="1"/>
|
||||||
|
<xs:maxInclusive value="65535"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/RSYNC/PORT -->
|
||||||
|
<xs:choice>
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||||
|
<xs:element name="pubkey" type="t_path" maxOccurs="1"
|
||||||
|
minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||||
|
<!-- BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||||
|
<xs:element name="password" maxOccurs="1" minOccurs="1"/>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/RSYNC/PUBKEY -->
|
||||||
|
</xs:choice>
|
||||||
|
</xs:sequence>
|
||||||
|
<xs:attribute name="enabled" type="xs:boolean" use="required"/>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC/IPXE -->
|
||||||
|
</xs:all>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE/SYNC -->
|
||||||
|
</xs:all>
|
||||||
|
<xs:attribute name="id" type="xs:positiveInteger" use="optional"/>
|
||||||
|
<xs:attribute name="name" type="xs:string" use="optional"/>
|
||||||
|
<xs:attribute name="uuid" use="optional">
|
||||||
|
<xs:simpleType>
|
||||||
|
<xs:restriction base="xs:string">
|
||||||
|
<xs:pattern
|
||||||
|
value="[0-9a-f]{8}\-[0-9a-f]{4}\-4[0-9a-f]{3}\-[89ab][0-9a-f]{3}\-[0-9a-f]{12}"/>
|
||||||
|
</xs:restriction>
|
||||||
|
</xs:simpleType>
|
||||||
|
</xs:attribute>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK/PROFILE -->
|
||||||
|
</xs:sequence>
|
||||||
|
</xs:complexType>
|
||||||
|
</xs:element>
|
||||||
|
<!-- END BDISK -->
|
||||||
|
</xs:schema>
|
187
bdisk/bsync.py
187
bdisk/bsync.py
@ -1,187 +0,0 @@
|
|||||||
import shutil
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import datetime
|
|
||||||
import git
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
def http(conf):
|
|
||||||
http = conf['http']
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
if conf['sync']['http']:
|
|
||||||
uid = pwd.getpwnam(http['user'])[2]
|
|
||||||
gid = grp.getgrnam(http['group'])[2]
|
|
||||||
httpdir = http['path']
|
|
||||||
archboot = build['archboot']
|
|
||||||
# remove the destination if it exists
|
|
||||||
if os.path.isdir(httpdir):
|
|
||||||
print('{0}: [HTTP] Removing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
httpdir))
|
|
||||||
shutil.rmtree(httpdir)
|
|
||||||
# just to make it again. we do this to avoid file existing conflicts.
|
|
||||||
os.makedirs(httpdir)
|
|
||||||
# here we build a dict of files to copy and their destination paths.
|
|
||||||
httpfiles = {}
|
|
||||||
print('{0}: [HTTP] (Boot files) => {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
httpdir))
|
|
||||||
for a in arch:
|
|
||||||
for i in ('md5', 'sfs', 'sha256', 'sha512'):
|
|
||||||
httpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
|
||||||
httpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
httpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.64.kern'.format(bdisk['uxname'])
|
|
||||||
httpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.64.img'.format(bdisk['uxname'])
|
|
||||||
if 'i686' in arch:
|
|
||||||
httpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
|
||||||
httpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
|
||||||
httpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
|
|
||||||
# and now the magic.
|
|
||||||
for k in httpfiles.keys():
|
|
||||||
destpath = httpfiles[k]
|
|
||||||
fulldest = '{0}/{1}'.format(httpdir, destpath)
|
|
||||||
parentdir = os.path.split(fulldest)[0]
|
|
||||||
os.makedirs(parentdir, exist_ok = True)
|
|
||||||
if os.path.lexists('{0}/{1}'.format(prepdir, k)):
|
|
||||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(httpdir, httpfiles[k]))
|
|
||||||
for root, dirs, files in os.walk(httpdir):
|
|
||||||
for d in dirs:
|
|
||||||
os.chown(os.path.join(root, d), uid, gid)
|
|
||||||
for f in files:
|
|
||||||
os.chown(os.path.join(root, f), uid, gid)
|
|
||||||
|
|
||||||
def tftp(conf):
|
|
||||||
# TODO: pxelinux cfg
|
|
||||||
tftp = conf['tftp']
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
if conf['sync']['tftp']:
|
|
||||||
uid = pwd.getpwnam(tftp['user'])[2]
|
|
||||||
gid = grp.getgrnam(tftp['group'])[2]
|
|
||||||
tftpdir = tftp['path']
|
|
||||||
# remove the destination if it exists
|
|
||||||
if os.path.isdir(tftpdir):
|
|
||||||
print('{0}: [TFTP] Removing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tftpdir))
|
|
||||||
shutil.rmtree(tftpdir)
|
|
||||||
# and we make it again
|
|
||||||
os.makedirs(tftpdir)
|
|
||||||
# and make a dict of the files etc.
|
|
||||||
tftpfiles = {}
|
|
||||||
print('{0}: [TFTP] (Boot files) => {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tftpdir))
|
|
||||||
for a in arch:
|
|
||||||
for i in ('md5', 'sfs', 'sha256', 'sha512'):
|
|
||||||
tftpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
|
||||||
tftpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
tftpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.kern'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.img'.format(bdisk['uxname'])
|
|
||||||
if 'i686' in arch:
|
|
||||||
tftpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
|
|
||||||
# and now the magic.
|
|
||||||
for k in tftpfiles.keys():
|
|
||||||
destpath = tftpfiles[k]
|
|
||||||
fulldest = '{0}/{1}'.format(tftpdir, destpath)
|
|
||||||
parentdir = os.path.split(fulldest)[0]
|
|
||||||
os.makedirs(parentdir, exist_ok = True)
|
|
||||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(tftpdir, tftpfiles[k]))
|
|
||||||
for root, dirs, files in os.walk(tftpdir):
|
|
||||||
for d in dirs:
|
|
||||||
os.chown(os.path.join(root, d), uid, gid)
|
|
||||||
for f in files:
|
|
||||||
os.chown(os.path.join(root, f), uid, gid)
|
|
||||||
|
|
||||||
def git(conf):
|
|
||||||
build = conf['build']
|
|
||||||
git_name = conf['bdisk']['dev']
|
|
||||||
git_email = conf['bdisk']['email']
|
|
||||||
if conf['sync']['git']:
|
|
||||||
print('{0}: [GIT] Creating commit...'.format(datetime.datetime.now()))
|
|
||||||
repo = git.Repo(build['basedir'])
|
|
||||||
repo.git.add('--all')
|
|
||||||
repo.index.commit("automated commit from BDisk (git:sync)")
|
|
||||||
print('{0}: [GIT] Pushing to remote...'.format(datetime.datetime.now()))
|
|
||||||
repo.remotes.origin.push()
|
|
||||||
|
|
||||||
|
|
||||||
def rsync(conf):
|
|
||||||
# TODO: just copy tftpbooting pxelinux.cfg (to be generated) if tftp,
|
|
||||||
# and do nothing if http- copying over three copies of the squashed filesystems
|
|
||||||
# is a waste of time, bandwidth, and disk space on target.
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
isodir = build['isodir']
|
|
||||||
arch = build['arch']
|
|
||||||
rsync = conf['rsync']
|
|
||||||
sync = conf['sync']
|
|
||||||
server = rsync['host']
|
|
||||||
path = rsync['path']
|
|
||||||
user = rsync['user']
|
|
||||||
locpath = False
|
|
||||||
if sync['rsync']:
|
|
||||||
# TODO: some sort of debugging/logging
|
|
||||||
cmd = ['/usr/bin/rsync',
|
|
||||||
'-a',
|
|
||||||
'-q',
|
|
||||||
'-z',
|
|
||||||
locpath,
|
|
||||||
'{0}@{1}:{2}/.'.format(user, server, path)]
|
|
||||||
#if sync['http']: # TODO: rsync:http to enable this
|
|
||||||
# cmd[4] = conf['http']['path']
|
|
||||||
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
|
||||||
# datetime.datetime.now(),
|
|
||||||
# cmd[4],
|
|
||||||
# server))
|
|
||||||
# subprocess.call(cmd)
|
|
||||||
#if sync['tftp']:
|
|
||||||
# cmd[4] = conf['tftp']['path']
|
|
||||||
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
|
||||||
# datetime.datetime.now(),
|
|
||||||
# cmd[4],
|
|
||||||
# server))
|
|
||||||
# subprocess.call(cmd)
|
|
||||||
if conf['ipxe']:
|
|
||||||
cmd[4] = build['archboot']
|
|
||||||
print('{0}: [RSYNC] {1} => {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd[4],
|
|
||||||
server))
|
|
||||||
subprocess.call(cmd)
|
|
||||||
cmd[4] = '{0}/boot'.format(build['prepdir'])
|
|
||||||
subprocess.call(cmd)
|
|
||||||
if conf['rsync']['iso']:
|
|
||||||
cmd[4] = isodir
|
|
||||||
print('{0}: [RSYNC] {1} => {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd[4],
|
|
||||||
server))
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# Now we copy some extra files.
|
|
||||||
prebuild_dir = '{0}/extra/pre-build.d'.format(build['basedir'])
|
|
||||||
rsync_files = ['{0}/VERSION_INFO.txt'.format(prepdir),
|
|
||||||
'{0}/root/packages.both'.format(prebuild_dir),
|
|
||||||
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
|
|
||||||
for x in rsync_files:
|
|
||||||
cmd[4] = x
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# And we grab the remaining, since we need to rename them.
|
|
||||||
for a in arch:
|
|
||||||
cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
|
|
||||||
cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)
|
|
||||||
subprocess.call(cmd)
|
|
||||||
cmd[4] = '{0}/{1}/root/iso.pkgs.arch'.format(prebuild_dir, a)
|
|
||||||
cmd[5] = '{0}@{1}:{2}/iso.pkgs.{3}'.format(user, server, path, a)
|
|
||||||
subprocess.call(cmd)
|
|
416
bdisk/build.py
416
bdisk/build.py
@ -1,416 +0,0 @@
|
|||||||
import os
|
|
||||||
import tarfile
|
|
||||||
import shutil
|
|
||||||
import glob
|
|
||||||
import subprocess
|
|
||||||
import hashlib
|
|
||||||
import psutil
|
|
||||||
import jinja2
|
|
||||||
import humanize
|
|
||||||
import datetime
|
|
||||||
import bGPG # bdisk.bGPG
|
|
||||||
from urllib.request import urlopen
|
|
||||||
|
|
||||||
|
|
||||||
def genImg(conf):
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
build = conf['build']
|
|
||||||
arch = build['arch']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
archboot = build['archboot']
|
|
||||||
basedir = build['basedir']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
hashes = {}
|
|
||||||
hashes['sha512'] = {}
|
|
||||||
hashes['sha256'] = {}
|
|
||||||
hashes['md5'] = {}
|
|
||||||
squashfses = []
|
|
||||||
for a in arch:
|
|
||||||
if a == 'i686':
|
|
||||||
bitness = '32'
|
|
||||||
elif a == 'x86_64':
|
|
||||||
bitness = '64'
|
|
||||||
# Create the squashfs image
|
|
||||||
airoot = archboot + '/' + a + '/'
|
|
||||||
squashimg = airoot + 'airootfs.sfs'
|
|
||||||
os.makedirs(airoot, exist_ok = True)
|
|
||||||
print("{0}: [BUILD] Squashing filesystem ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
chrootdir + '/root.' + a))
|
|
||||||
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
|
||||||
# /dev/null.
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/usr/bin/mksquashfs',
|
|
||||||
chrootdir + '/root.' + a,
|
|
||||||
squashimg,
|
|
||||||
'-no-progress',
|
|
||||||
'-noappend',
|
|
||||||
'-comp', 'xz']
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
print("{0}: [BUILD] Generated {1} ({2}).".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
squashimg,
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(squashimg))))
|
|
||||||
# Generate the checksum files
|
|
||||||
print("{0}: [BUILD] Generating SHA512 SHA256, MD5 checksums ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
squashimg))
|
|
||||||
hashes['sha512'][a] = hashlib.sha512()
|
|
||||||
hashes['sha256'][a] = hashlib.sha256()
|
|
||||||
hashes['md5'][a] = hashlib.md5()
|
|
||||||
with open(squashimg, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
# NOTE: these items are hashlib objects, NOT strings!
|
|
||||||
hashes['sha512'][a].update(stream)
|
|
||||||
hashes['sha256'][a].update(stream)
|
|
||||||
hashes['md5'][a].update(stream)
|
|
||||||
with open(airoot + 'airootfs.sha512', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['sha512'][a].hexdigest()))
|
|
||||||
with open(airoot + 'airootfs.sha256', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['sha256'][a].hexdigest()))
|
|
||||||
with open(airoot + 'airootfs.md5', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['md5'][a].hexdigest()))
|
|
||||||
squashfses.append('{0}'.format(squashimg))
|
|
||||||
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
|
|
||||||
# Logo
|
|
||||||
os.makedirs(prepdir + '/boot', exist_ok = True)
|
|
||||||
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
|
||||||
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
|
||||||
else:
|
|
||||||
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
|
||||||
# Kernels, initrds...
|
|
||||||
# We use a dict here so we can use the right filenames...
|
|
||||||
# I might change how I handle this in the future.
|
|
||||||
bootfiles = {}
|
|
||||||
#bootfiles['kernel'] = ['vmlinuz-linux-' + bdisk['name'], '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
|
||||||
bootfiles['kernel'] = ['vmlinuz-linux', '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
|
||||||
#bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
|
||||||
bootfiles['initrd'] = ['initramfs-linux.img', '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
|
||||||
for x in ('kernel', 'initrd'):
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(prepdir, bootfiles[x][1]))
|
|
||||||
for i in squashfses:
|
|
||||||
bGPG.signIMG(i, conf)
|
|
||||||
|
|
||||||
|
|
||||||
def genUEFI(build, bdisk):
|
|
||||||
arch = build['arch']
|
|
||||||
# 32-bit EFI implementations are nigh nonexistant.
|
|
||||||
# We don't really need to worry about them.
|
|
||||||
# Plus there's always multiarch.
|
|
||||||
# I can probably do this better with a dict... TODO.
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
basedir = build['basedir']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
mountpt = build['mountpt']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
efidir = '{0}/EFI/{1}'.format(prepdir, bdisk['name'])
|
|
||||||
os.makedirs(efidir, exist_ok = True)
|
|
||||||
efiboot_img = efidir + '/efiboot.img'
|
|
||||||
os.makedirs(prepdir + '/EFI/boot', exist_ok = True)
|
|
||||||
os.makedirs(efidir, exist_ok = True)
|
|
||||||
## Download the EFI shells if we don't have them.
|
|
||||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
|
||||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v2.efi'):
|
|
||||||
shell2_path = prepdir + '/EFI/shellx64_v2.efi'
|
|
||||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell2_path))
|
|
||||||
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
|
||||||
shell2_fetch = urlopen(shell2_url)
|
|
||||||
with open(shell2_path, 'wb+') as dl:
|
|
||||||
dl.write(shell2_fetch.read())
|
|
||||||
shell2_fetch.close()
|
|
||||||
# Shell for older versions (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=Efi-shell)
|
|
||||||
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
|
||||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v1.efi'):
|
|
||||||
shell1_path = prepdir + '/EFI/shellx64_v1.efi'
|
|
||||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell1_path))
|
|
||||||
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
|
||||||
shell1_fetch = urlopen(shell1_url)
|
|
||||||
with open(shell1_path, 'wb+') as dl:
|
|
||||||
dl.write(shell1_fetch.read())
|
|
||||||
shell1_fetch.close()
|
|
||||||
print("{0}: [BUILD] Building UEFI support...".format(datetime.datetime.now()))
|
|
||||||
## But wait! That's not all! We need more binaries.
|
|
||||||
# Looks like these are in the "efitools" package now.
|
|
||||||
for f in ('PreLoader.efi', 'HashTool.efi'):
|
|
||||||
if f == 'PreLoader.efi':
|
|
||||||
fname = 'bootx64.efi'
|
|
||||||
else:
|
|
||||||
fname = f
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir,
|
|
||||||
f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(prepdir, fname), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And we also need the systemd efi bootloader.
|
|
||||||
if os.path.isfile(prepdir + '/EFI/boot/loader.efi'):
|
|
||||||
os.remove(prepdir + '/EFI/boot/loader.efi')
|
|
||||||
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
|
|
||||||
chrootdir),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/loader.efi'.format(prepdir), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And the accompanying configs for the systemd efi bootloader, too.
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
os.makedirs(prepdir + '/loader/entries', exist_ok = True)
|
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
|
||||||
if t == 'base':
|
|
||||||
fname = bdisk['uxname'] + '.conf'
|
|
||||||
elif t not in ('uefi1', 'uefi2'):
|
|
||||||
fname = t + '.conf'
|
|
||||||
else:
|
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
|
||||||
if t == 'loader':
|
|
||||||
tplpath = prepdir + '/loader/'
|
|
||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
|
||||||
else:
|
|
||||||
tplpath = prepdir + '/loader/entries/'
|
|
||||||
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open(tplpath + fname, "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And we need to get filesizes (in bytes) for everything we need to include in the ESP.
|
|
||||||
# This is more important than it looks.
|
|
||||||
sizetotal = 33553920 # The spec'd EFI binary size (32MB). It's okay to go over this though (and we do)
|
|
||||||
# because xorriso sees it as a filesystem image and adjusts the ISO automagically.
|
|
||||||
#sizetotal = 2097152 # we start with 2MB and add to it for wiggle room
|
|
||||||
sizefiles = ['/boot/' + bdisk['uxname'] + '.64.img',
|
|
||||||
'/boot/' + bdisk['uxname'] + '.64.kern',
|
|
||||||
'/EFI/boot/bootx64.efi',
|
|
||||||
'/EFI/boot/loader.efi',
|
|
||||||
'/EFI/boot/HashTool.efi',
|
|
||||||
'/EFI/shellx64_v1.efi',
|
|
||||||
'/EFI/shellx64_v2.efi']
|
|
||||||
for i in sizefiles:
|
|
||||||
sizetotal += os.path.getsize(prepdir + i)
|
|
||||||
# Loader configs
|
|
||||||
for (path, dirs, files) in os.walk(prepdir + '/loader/'):
|
|
||||||
for file in files:
|
|
||||||
fname = os.path.join(path, file)
|
|
||||||
sizetotal += os.path.getsize(fname)
|
|
||||||
# And now we create the EFI binary filesystem image/binary...
|
|
||||||
print("{0}: [BUILD] Creating EFI ESP image {2} ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
humanize.naturalsize(sizetotal),
|
|
||||||
efiboot_img))
|
|
||||||
if os.path.isfile(efiboot_img):
|
|
||||||
os.remove(efiboot_img)
|
|
||||||
with open(efiboot_img, 'wb+') as f:
|
|
||||||
f.truncate(sizetotal)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', bdisk['name'] + '_EFI', efiboot_img]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
cmd = ['/bin/mount', efiboot_img, build['mountpt']]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
os.makedirs('{0}/EFI/{1}'.format(build['mountpt'], bdisk['name']))
|
|
||||||
os.makedirs(build['mountpt'] + '/EFI/boot')
|
|
||||||
os.makedirs(build['mountpt'] + '/loader/entries')
|
|
||||||
# Ready for some deja vu? This is because it uses an embedded version as well for hybrid ISO.
|
|
||||||
# I think.
|
|
||||||
# TODO: just move this to a function instead, with "efi" as a param and change
|
|
||||||
# the templates to use "if efi == 'yes'" instead.
|
|
||||||
# function should set the "installation" path for the conf as well based on the value of efi
|
|
||||||
# parameter.
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
|
||||||
if t == 'base':
|
|
||||||
fname = bdisk['uxname'] + '.conf'
|
|
||||||
elif t in ('uefi1', 'uefi2'):
|
|
||||||
fname = t + '.conf'
|
|
||||||
else:
|
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
|
||||||
if t == 'loader':
|
|
||||||
tplpath = build['mountpt'] + '/loader/'
|
|
||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
|
||||||
else:
|
|
||||||
tplpath = build['mountpt'] + '/loader/entries/'
|
|
||||||
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, efi = 'yes')
|
|
||||||
with open(tplpath + fname, "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi'):
|
|
||||||
y = prepdir + '/EFI/boot/' + x
|
|
||||||
z = mountpt + '/EFI/boot/' + x
|
|
||||||
if os.path.isfile(z):
|
|
||||||
os.remove(z)
|
|
||||||
shutil.copy(y, z)
|
|
||||||
for x in ('shellx64_v1.efi', 'shellx64_v2.efi'):
|
|
||||||
y = prepdir + '/EFI/' + x
|
|
||||||
z = mountpt + '/EFI/' + x
|
|
||||||
if os.path.isfile(z):
|
|
||||||
os.remove(z)
|
|
||||||
shutil.copy(y, z)
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux'.format(chrootdir, 'x86_64'),
|
|
||||||
'{0}/EFI/{1}/{2}.efi'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/initramfs-linux.img'.format(chrootdir, 'x86_64'),
|
|
||||||
'{0}/EFI/{1}/{2}.img'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
|
||||||
# TODO: support both arch's as EFI bootable instead? Maybe? requires more research. very rare.
|
|
||||||
#shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, a, bdisk['name']),
|
|
||||||
# '{0}/EFI/{1}/{2}.{3}.efi'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
|
||||||
#shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, a, bdisk['uxname']),
|
|
||||||
# '{0}/EFI/{1}/{2}.{3}.img'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
|
||||||
cmd = ['/bin/umount', mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
|
|
||||||
print('{0}: [BUILD] Built EFI binary.'.format(datetime.datetime.now()))
|
|
||||||
return(efiboot_img)
|
|
||||||
|
|
||||||
def genISO(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
archboot = build['archboot']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
arch = build['arch']
|
|
||||||
builddir = prepdir + '/' + bdisk['name']
|
|
||||||
extradir = build['basedir'] + '/extra/'
|
|
||||||
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
|
|
||||||
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
|
|
||||||
# TODO: can we use syslinux for EFI too instead of prebootloader?
|
|
||||||
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
|
||||||
sysl_tmp = prepdir + '/isolinux/'
|
|
||||||
ver = bdisk['ver']
|
|
||||||
if len(arch) == 1:
|
|
||||||
isofile = '{0}-{1}-{2}-{3}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'], arch[0])
|
|
||||||
else:
|
|
||||||
isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
|
|
||||||
isopath = build['isodir'] + '/' + isofile
|
|
||||||
arch = build['arch']
|
|
||||||
# In case we're building a single-arch ISO...
|
|
||||||
if len(arch) == 1:
|
|
||||||
isolinux_cfg = '/BIOS/isolinux.cfg.arch.j2'
|
|
||||||
if arch[0] == 'i686':
|
|
||||||
bitness = '32'
|
|
||||||
efi = False
|
|
||||||
elif arch[0] == 'x86_64':
|
|
||||||
bitness = '64'
|
|
||||||
efi = True
|
|
||||||
else:
|
|
||||||
isolinux_cfg = '/BIOS/isolinux.cfg.multi.j2'
|
|
||||||
bitness = False
|
|
||||||
efi = True
|
|
||||||
if os.path.isfile(isopath):
|
|
||||||
os.remove(isopath)
|
|
||||||
if archboot != prepdir + '/' + bdisk['name']: # best to use static concat here...
|
|
||||||
if os.path.isdir(builddir):
|
|
||||||
shutil.rmtree(builddir, ignore_errors = True)
|
|
||||||
shutil.copytree(archboot, builddir)
|
|
||||||
if build['ipxe']:
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
if ipxe['iso']:
|
|
||||||
minifile = '{0}-{1}-mini.iso'.format(bdisk['uxname'], bdisk['ver'])
|
|
||||||
minipath = build['isodir'] + '/' + minifile
|
|
||||||
if ipxe['usb']:
|
|
||||||
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
|
||||||
minipath = build['isodir'] + '/' + usbfile
|
|
||||||
# Copy isolinux files
|
|
||||||
print("{0}: [BUILD] Staging ISO preparation...".format(datetime.datetime.now()))
|
|
||||||
isolinux_files = ['isolinux.bin',
|
|
||||||
'vesamenu.c32',
|
|
||||||
'linux.c32',
|
|
||||||
'reboot.c32']
|
|
||||||
# TODO: implement debugging mode in bdisk
|
|
||||||
#if debug:
|
|
||||||
# isolinux_files[0] = 'isolinux-debug.bin'
|
|
||||||
os.makedirs(sysl_tmp, exist_ok = True)
|
|
||||||
for f in isolinux_files:
|
|
||||||
if os.path.isfile(sysl_tmp + f):
|
|
||||||
os.remove(sysl_tmp + f)
|
|
||||||
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
|
||||||
ifisolinux_files = ['ldlinux.c32',
|
|
||||||
'libcom32.c32',
|
|
||||||
'libutil.c32',
|
|
||||||
'ifcpu64.c32']
|
|
||||||
for f in ifisolinux_files:
|
|
||||||
if os.path.isfile(sysl_tmp + f):
|
|
||||||
os.remove(sysl_tmp + f)
|
|
||||||
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
tpl = env.get_template(isolinux_cfg)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, bitness = bitness)
|
|
||||||
with open(sysl_tmp + '/isolinux.cfg', "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And we need to build the ISO!
|
|
||||||
# TODO: only include UEFI support if we actually built it!
|
|
||||||
print("{0}: [BUILD] Building full ISO ({1})...".format(datetime.datetime.now(), isopath))
|
|
||||||
if efi:
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'],
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
|
|
||||||
'-eltorito-alt-boot',
|
|
||||||
'-e', 'EFI/' + bdisk['name'] + '/efiboot.img',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
prepdir]
|
|
||||||
else:
|
|
||||||
# UNTESTED. TODO.
|
|
||||||
# I think i want to also get rid of: -boot-load-size 4,
|
|
||||||
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
|
|
||||||
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'],
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
prepdir]
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# Get size of ISO
|
|
||||||
iso = {}
|
|
||||||
iso['name'] = ['Main']
|
|
||||||
iso['Main'] = {}
|
|
||||||
iso['Main']['sha'] = hashlib.sha256()
|
|
||||||
with open(isopath, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
iso['Main']['sha'].update(stream)
|
|
||||||
iso['Main']['sha'] = iso['Main']['sha'].hexdigest()
|
|
||||||
iso['Main']['file'] = isopath
|
|
||||||
iso['Main']['size'] = humanize.naturalsize(os.path.getsize(isopath))
|
|
||||||
iso['Main']['type'] = 'Full'
|
|
||||||
iso['Main']['fmt'] = 'Hybrid ISO'
|
|
||||||
return(iso)
|
|
||||||
|
|
||||||
def displayStats(iso):
|
|
||||||
for i in iso['name']:
|
|
||||||
print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))
|
|
||||||
print('\t\t\t = Size: {0}'.format(iso[i]['size']))
|
|
||||||
print('\t\t\t = SHA256: {0}'.format(iso[i]['sha']))
|
|
||||||
print('\t\t\t = Location: {0}'.format(iso[i]['file']))
|
|
||||||
|
|
||||||
def cleanUp():
|
|
||||||
# TODO: clear out all of prepdir?
|
|
||||||
pass
|
|
3
bdisk/chroot.py
Normal file
3
bdisk/chroot.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
import os
|
||||||
|
import psutil
|
||||||
|
import subprocess
|
1030
bdisk/confgen.py
Executable file
1030
bdisk/confgen.py
Executable file
File diff suppressed because it is too large
Load Diff
393
bdisk/confparse.py
Normal file
393
bdisk/confparse.py
Normal file
@ -0,0 +1,393 @@
|
|||||||
|
import copy
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import re
|
||||||
|
import lxml.etree
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
import utils # LOCAL
|
||||||
|
|
||||||
|
|
||||||
|
etree = lxml.etree
|
||||||
|
detect = utils.detect()
|
||||||
|
generate = utils.generate()
|
||||||
|
transform = utils.transform()
|
||||||
|
valid = utils.valid()
|
||||||
|
|
||||||
|
class Conf(object):
|
||||||
|
def __init__(self, cfg, profile = None, validate_cfg = False,
|
||||||
|
xsd_file = None):
|
||||||
|
"""
|
||||||
|
A configuration object.
|
||||||
|
|
||||||
|
Read a configuration file, parse it, and make it available to the rest
|
||||||
|
of BDisk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
|
||||||
|
cfg The configuration. Can be a filesystem path, a string,
|
||||||
|
bytes, or a stream. If bytes or a bytestream, it must be
|
||||||
|
in UTF-8 format.
|
||||||
|
|
||||||
|
profile (optional) A sub-profile in the configuration. If None
|
||||||
|
is provided, we'll first look for the first profile
|
||||||
|
named 'default' (case-insensitive). If one isn't found,
|
||||||
|
then the first profile found will be used. Can be a
|
||||||
|
string (in which we'll automatically search for the
|
||||||
|
given value in the "name" attribute) or a dict for more
|
||||||
|
fine-grained profile identification, such as:
|
||||||
|
|
||||||
|
{'name': 'PROFILE_NAME',
|
||||||
|
'id': 1,
|
||||||
|
'uuid': '00000000-0000-0000-0000-000000000000'}
|
||||||
|
|
||||||
|
You can provide any combination of these
|
||||||
|
(e.g. "profile={'id': 2, 'name' = 'some_profile'}").
|
||||||
|
Non-greedy matching (meaning ALL attributes specified
|
||||||
|
must match).
|
||||||
|
"""
|
||||||
|
if validate_cfg == 'pre':
|
||||||
|
# Validate before attempting any other operations
|
||||||
|
self.validate()
|
||||||
|
self.xml_suppl = utils.xml_supplicant(cfg, profile = profile)
|
||||||
|
self.xml = self.xml_suppl.xml
|
||||||
|
for e in self.xml_suppl.xml.iter():
|
||||||
|
self.xml_suppl.substitute(e)
|
||||||
|
self.xml_suppl.get_profile(profile = self.xml_suppl.orig_profile)
|
||||||
|
with open('/tmp/parsed.xml', 'wb') as f:
|
||||||
|
f.write(lxml.etree.tostring(self.xml_suppl.xml))
|
||||||
|
self.profile = self.xml_suppl.profile
|
||||||
|
self.xsd = xsd_file
|
||||||
|
self.cfg = {}
|
||||||
|
if validate_cfg:
|
||||||
|
# Validation post-substitution
|
||||||
|
self.validate(parsed = False)
|
||||||
|
# TODO: populate checksum{} with hash_algo if explicit
|
||||||
|
|
||||||
|
def get_pki_obj(self, pki, pki_type):
|
||||||
|
elem = {}
|
||||||
|
if pki_type not in ('ca', 'client'):
|
||||||
|
raise ValueError('pki_type must be "ca" or "client"')
|
||||||
|
if pki_type == 'ca':
|
||||||
|
elem['index'] = None
|
||||||
|
elem['serial'] = None
|
||||||
|
for e in pki.xpath('./*'):
|
||||||
|
# These have attribs or children.
|
||||||
|
if e.tag in ('cert', 'key', 'subject'):
|
||||||
|
elem[e.tag] = {}
|
||||||
|
if e.tag == 'subject':
|
||||||
|
for sub in e.xpath('./*'):
|
||||||
|
elem[e.tag][sub.tag] = transform.xml2py(sub.text,
|
||||||
|
attrib = False)
|
||||||
|
else:
|
||||||
|
for a in e.xpath('./@*'):
|
||||||
|
elem[e.tag][a.attrname] = transform.xml2py(a)
|
||||||
|
elem[e.tag]['path'] = e.text
|
||||||
|
else:
|
||||||
|
elem[e.tag] = e.text
|
||||||
|
return(elem)
|
||||||
|
|
||||||
|
def get_source(self, source, item, _source):
|
||||||
|
_source_item = {'flags': [], 'fname': None}
|
||||||
|
elem = source.xpath('./{0}'.format(item))[0]
|
||||||
|
if item == 'checksum':
|
||||||
|
if elem.get('explicit', False):
|
||||||
|
_explicit = transform.xml2py(
|
||||||
|
elem.attrib['explicit'])
|
||||||
|
_source_item['explicit'] = _explicit
|
||||||
|
if _explicit:
|
||||||
|
del(_source_item['fname'])
|
||||||
|
_source_item['value'] = elem.text
|
||||||
|
return(_source_item)
|
||||||
|
else:
|
||||||
|
_source_item['explicit'] = False
|
||||||
|
if elem.get('hash_algo', False):
|
||||||
|
_source_item['hash_algo'] = elem.attrib['hash_algo']
|
||||||
|
else:
|
||||||
|
_source_item['hash_algo'] = None
|
||||||
|
if item == 'sig':
|
||||||
|
if elem.get('keys', False):
|
||||||
|
_keys = [i.strip() for i in elem.attrib['keys'].split()]
|
||||||
|
_source_item['keys'] = _keys
|
||||||
|
else:
|
||||||
|
_source_item['keys'] = []
|
||||||
|
if elem.get('keyserver', False):
|
||||||
|
_source_item['keyserver'] = elem.attrib['keyserver']
|
||||||
|
else:
|
||||||
|
_source_item['keyserver'] = None
|
||||||
|
_item = elem.text
|
||||||
|
_flags = elem.get('flags', '')
|
||||||
|
if _flags:
|
||||||
|
for f in _flags.split():
|
||||||
|
if f.strip().lower() == 'none':
|
||||||
|
continue
|
||||||
|
_source_item['flags'].append(f.strip().lower())
|
||||||
|
if _source_item['flags']:
|
||||||
|
if 'regex' in _source_item['flags']:
|
||||||
|
ptrn = _item.format(**self.xml_suppl.btags['regex'])
|
||||||
|
else:
|
||||||
|
ptrn = None
|
||||||
|
# TODO: remove all this shit! we switch to just a mirror url.
|
||||||
|
_source_item['fname'] = detect.remote_files(
|
||||||
|
'/'.join((_source['mirror'],
|
||||||
|
_source['rootpath'])),
|
||||||
|
ptrn = ptrn,
|
||||||
|
flags = _source_item['flags'])
|
||||||
|
else:
|
||||||
|
_source_item['fname'] = _item
|
||||||
|
return(_source_item)
|
||||||
|
|
||||||
|
def get_xsd(self):
|
||||||
|
if isinstance(self.xsd, lxml.etree.XMLSchema):
|
||||||
|
return(self.xsd)
|
||||||
|
if not self.xsd:
|
||||||
|
path = os.path.join(os.path.dirname(__file__), 'bdisk.xsd')
|
||||||
|
else:
|
||||||
|
path = os.path.abspath(os.path.expanduser(self.xsd))
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
xsd = lxml.etree.parse(f)
|
||||||
|
return(xsd)
|
||||||
|
|
||||||
|
def parse_accounts(self):
|
||||||
|
## PROFILE/ACCOUNTS
|
||||||
|
self.cfg['users'] = []
|
||||||
|
# First we handle the root user, since it's a "special" case.
|
||||||
|
_root = self.profile.xpath('./accounts/rootpass')
|
||||||
|
self.cfg['root'] = transform.user(_root)
|
||||||
|
for user in self.profile.xpath('./accounts/user'):
|
||||||
|
_user = {'username': user.xpath('./username/text()')[0],
|
||||||
|
'sudo': transform.xml2py(user.attrib['sudo']),
|
||||||
|
'comment': None}
|
||||||
|
_comment = user.xpath('./comment/text()')
|
||||||
|
if len(_comment):
|
||||||
|
_user['comment'] = _comment[0]
|
||||||
|
_password = user.xpath('./password')
|
||||||
|
_user.update(transform.user(_password))
|
||||||
|
self.cfg['users'].append(_user)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_all(self):
|
||||||
|
self.parse_profile()
|
||||||
|
self.parse_meta()
|
||||||
|
self.parse_accounts()
|
||||||
|
self.parse_sources()
|
||||||
|
self.parse_buildpaths()
|
||||||
|
self.parse_pki()
|
||||||
|
self.parse_gpg()
|
||||||
|
self.parse_sync()
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_buildpaths(self):
|
||||||
|
## PROFILE/BUILD(/PATHS)
|
||||||
|
self.cfg['build'] = {'paths': {}}
|
||||||
|
build = self.profile.xpath('./build')[0]
|
||||||
|
_optimize = build.get('its_full_of_stars', 'false')
|
||||||
|
self.cfg['build']['optimize'] = transform.xml2py(_optimize)
|
||||||
|
for path in build.xpath('./paths/*'):
|
||||||
|
self.cfg['build']['paths'][path.tag] = path.text
|
||||||
|
self.cfg['build']['guests'] = build.get('guests', 'archlinux')
|
||||||
|
# iso and ipxe are their own basic profile elements, but we group them
|
||||||
|
# in here because 1.) they're related, and 2.) they're simple to
|
||||||
|
# import. This may change in the future if they become more complex.
|
||||||
|
## PROFILE/ISO
|
||||||
|
self.cfg['iso'] = {'sign': None,
|
||||||
|
'multi_arch': None}
|
||||||
|
self.cfg['ipxe'] = {'sign': None,
|
||||||
|
'iso': None}
|
||||||
|
for x in ('iso', 'ipxe'):
|
||||||
|
# We enable all features by default.
|
||||||
|
elem = self.profile.xpath('./{0}'.format(x))[0]
|
||||||
|
for a in self.cfg[x]:
|
||||||
|
self.cfg[x][a] = transform.xml2py(elem.get(a, 'true'))
|
||||||
|
if x == 'ipxe':
|
||||||
|
self.cfg[x]['uri'] = elem.xpath('./uri/text()')[0]
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_gpg(self):
|
||||||
|
## PROFILE/GPG
|
||||||
|
self.cfg['gpg'] = {'keyid': None,
|
||||||
|
'gnupghome': None,
|
||||||
|
'publish': None,
|
||||||
|
'prompt_passphrase': None,
|
||||||
|
'keys': []}
|
||||||
|
elem = self.profile.xpath('./gpg')[0]
|
||||||
|
for attr in elem.xpath('./@*'):
|
||||||
|
self.cfg['gpg'][attr.attrname] = transform.xml2py(attr)
|
||||||
|
for key in elem.xpath('./key'):
|
||||||
|
_keytpl = {'algo': 'rsa',
|
||||||
|
'keysize': '4096'}
|
||||||
|
_key = copy.deepcopy(_keytpl)
|
||||||
|
_key['name'] = None
|
||||||
|
_key['email'] = None
|
||||||
|
_key['comment'] = None
|
||||||
|
for attr in key.xpath('./@*'):
|
||||||
|
_key[attr.attrname] = transform.xml2py(attr)
|
||||||
|
for param in key.xpath('./*'):
|
||||||
|
if param.tag == 'subkey':
|
||||||
|
# We only support one subkey (for key generation).
|
||||||
|
if 'subkey' not in _key:
|
||||||
|
_key['subkey'] = copy.deepcopy(_keytpl)
|
||||||
|
for attr in param.xpath('./@*'):
|
||||||
|
_key['subkey'][attr.attrname] = transform.xml2py(attr)
|
||||||
|
print(_key)
|
||||||
|
else:
|
||||||
|
_key[param.tag] = transform.xml2py(param.text, attrib = False)
|
||||||
|
self.cfg['gpg']['keys'].append(_key)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_meta(self):
|
||||||
|
## PROFILE/META
|
||||||
|
# Get the various meta strings. We skip regexes (we handle those
|
||||||
|
# separately since they're unique'd per id attrib) and variables (they
|
||||||
|
# are already substituted by self.xml_suppl.substitute(x)).
|
||||||
|
_meta_iters = ('dev', 'names')
|
||||||
|
for t in _meta_iters:
|
||||||
|
self.cfg[t] = {}
|
||||||
|
_xpath = './meta/{0}'.format(t)
|
||||||
|
for e in self.profile.xpath(_xpath):
|
||||||
|
for se in e:
|
||||||
|
if not isinstance(se, lxml.etree._Comment):
|
||||||
|
self.cfg[t][se.tag] = transform.xml2py(se.text,
|
||||||
|
attrib = False)
|
||||||
|
for e in ('desc', 'uri', 'ver', 'max_recurse'):
|
||||||
|
_xpath = './meta/{0}/text()'.format(e)
|
||||||
|
self.cfg[e] = transform.xml2py(self.profile.xpath(_xpath)[0],
|
||||||
|
attrib = False)
|
||||||
|
# HERE is where we would handle regex patterns.
|
||||||
|
# But we don't, because they're in self.xml_suppl.btags['regex'].
|
||||||
|
#self.cfg['regexes'] = {}
|
||||||
|
#_regexes = self.profile.xpath('./meta/regexes/pattern')
|
||||||
|
#if len(_regexes):
|
||||||
|
# for ptrn in _regexes:
|
||||||
|
# self.cfg['regexes'][ptrn.attrib['id']] = re.compile(ptrn.text)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_pki(self):
|
||||||
|
## PROFILE/PKI
|
||||||
|
self.cfg['pki'] = {'clients': []}
|
||||||
|
elem = self.profile.xpath('./pki')[0]
|
||||||
|
self.cfg['pki']['overwrite'] = transform.xml2py(
|
||||||
|
elem.get('overwrite', 'false'))
|
||||||
|
ca = elem.xpath('./ca')[0]
|
||||||
|
clients = elem.xpath('./client')
|
||||||
|
self.cfg['pki']['ca'] = self.get_pki_obj(ca, 'ca')
|
||||||
|
for client in clients:
|
||||||
|
self.cfg['pki']['clients'].append(self.get_pki_obj(client,
|
||||||
|
'client'))
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_profile(self):
|
||||||
|
## PROFILE
|
||||||
|
# The following are attributes of profiles that serve as identifiers.
|
||||||
|
self.cfg['profile'] = {'id': None,
|
||||||
|
'name': None,
|
||||||
|
'uuid': None}
|
||||||
|
for a in self.cfg['profile']:
|
||||||
|
if a in self.profile.attrib:
|
||||||
|
self.cfg['profile'][a] = transform.xml2py(
|
||||||
|
self.profile.attrib[a],
|
||||||
|
attrib = True)
|
||||||
|
# Small bug in transform.xml2py that we unfortunately can't fix, so we manually fix.
|
||||||
|
if 'id' in self.cfg['profile'] and isinstance(self.cfg['profile']['id'], bool):
|
||||||
|
self.cfg['profile']['id'] = int(self.cfg['profile']['id'])
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_sources(self):
|
||||||
|
## PROFILE/SOURCES
|
||||||
|
self.cfg['sources'] = []
|
||||||
|
for source in self.profile.xpath('./sources/source'):
|
||||||
|
_source = {}
|
||||||
|
_source['arch'] = source.attrib['arch']
|
||||||
|
_source['mirror'] = source.xpath('./mirror/text()')[0]
|
||||||
|
_source['rootpath'] = source.xpath('./rootpath/text()')[0]
|
||||||
|
# The tarball, checksum, and sig components requires some...
|
||||||
|
# special care.
|
||||||
|
for e in ('tarball', 'checksum', 'sig'):
|
||||||
|
_source[e] = self.get_source(source, e, _source)
|
||||||
|
self.cfg['sources'].append(_source)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def parse_sync(self):
|
||||||
|
## PROFILE/SYNC
|
||||||
|
self.cfg['sync'] = {}
|
||||||
|
elem = self.profile.xpath('./sync')[0]
|
||||||
|
# We populate defaults in case they weren't specified.
|
||||||
|
for e in ('gpg', 'ipxe', 'iso', 'tftp'):
|
||||||
|
self.cfg['sync'][e] = {'enabled': False,
|
||||||
|
'path': None}
|
||||||
|
sub = elem.xpath('./{0}'.format(e))[0]
|
||||||
|
for a in sub.xpath('./@*'):
|
||||||
|
self.cfg['sync'][e][a.attrname] = transform.xml2py(a)
|
||||||
|
self.cfg['sync'][e]['path'] = sub.text
|
||||||
|
rsync = elem.xpath('./rsync')[0]
|
||||||
|
self.cfg['sync']['rsync'] = {'enabled': False}
|
||||||
|
for a in rsync.xpath('./@*'):
|
||||||
|
self.cfg['sync']['rsync'][a.attrname] = transform.xml2py(a)
|
||||||
|
for sub in rsync.xpath('./*'):
|
||||||
|
self.cfg['sync']['rsync'][sub.tag] = transform.xml2py(
|
||||||
|
sub.text,
|
||||||
|
attrib = False)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def validate(self, parsed = False):
|
||||||
|
xsd = self.get_xsd()
|
||||||
|
if not isinstance(xsd, lxml.etree.XMLSchema):
|
||||||
|
self.xsd = etree.XMLSchema(xsd)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# This would return a bool if it validates or not.
|
||||||
|
#self.xsd.validate(self.xml)
|
||||||
|
# We want to get a more detailed exception.
|
||||||
|
xml = etree.fromstring(self.xml_suppl.return_full())
|
||||||
|
self.xsd.assertValid(xml)
|
||||||
|
if parsed:
|
||||||
|
# We wait until after it's parsed to evaluate because otherwise we
|
||||||
|
# can't use utils.valid().
|
||||||
|
# We only bother with stuff that would hinder building, though -
|
||||||
|
# e.g. we don't check that profile's UUID is a valid UUID4.
|
||||||
|
# The XSD can catch a lot of stuff, but it's not so hot with things like URI validation,
|
||||||
|
# email validation, etc.
|
||||||
|
# URLs
|
||||||
|
for url in (self.cfg['uri'], self.cfg['dev']['website']):
|
||||||
|
if not valid.url(url):
|
||||||
|
raise ValueError('{0} is not a valid URL.'.format(url))
|
||||||
|
# Emails
|
||||||
|
for k in self.cfg['gpg']['keys']:
|
||||||
|
if not valid.email(k['email']):
|
||||||
|
raise ValueError('GPG key {0}: {1} is not a valid email address'.format(k['name'], k['email']))
|
||||||
|
if not valid.email(self.cfg['dev']['email']):
|
||||||
|
raise ValueError('{0} is not a valid email address'.format(self.cfg['dev']['email']))
|
||||||
|
if self.cfg['pki']:
|
||||||
|
if 'subject' in self.cfg['pki']['ca']:
|
||||||
|
if not valid.email(self.cfg['pki']['ca']['subject']['emailAddress']):
|
||||||
|
raise ValueError('{0} is not a valid email address'.format(
|
||||||
|
self.cfg['pki']['ca']['subject']['emailAddress']))
|
||||||
|
for cert in self.cfg['pki']['clients']:
|
||||||
|
if not cert['subject']:
|
||||||
|
continue
|
||||||
|
if not valid.email(cert['subject']['emailAddress']):
|
||||||
|
raise ValueError('{0} is not a valid email address'.format(cert['subject']['email']))
|
||||||
|
# Salts/hashes
|
||||||
|
if self.cfg['root']['salt']:
|
||||||
|
if not valid.salt_hash(self.cfg['root']['salt']):
|
||||||
|
raise ValueError('{0} is not a valid salt'.format(self.cfg['root']['salt']))
|
||||||
|
if self.cfg['root']['hashed']:
|
||||||
|
if not valid.salt_hash_full(self.cfg['root']['salt_hash'], self.cfg['root']['hash_algo']):
|
||||||
|
raise ValueError('{0} is not a valid hash of type {1}'.format(self.cfg['root']['salt_hash'],
|
||||||
|
self.cfg['root']['hash_algo']))
|
||||||
|
for u in self.cfg['users']:
|
||||||
|
if u['salt']:
|
||||||
|
if not valid.salt_hash(u['salt']):
|
||||||
|
raise ValueError('{0} is not a valid salt'.format(u['salt']))
|
||||||
|
if u['hashed']:
|
||||||
|
if not valid.salt_hash_full(u['salt_hash'], u['hash_algo']):
|
||||||
|
raise ValueError('{0} is not a valid hash of type {1}'.format(u['salt_hash'], u['hash_algo']))
|
||||||
|
# GPG Key IDs
|
||||||
|
if self.cfg['gpg']['keyid']:
|
||||||
|
if not valid.gpgkeyID(self.cfg['gpg']['keyid']):
|
||||||
|
raise ValueError('{0} is not a valid GPG Key ID/fingerprint'.format(self.cfg['gpg']['keyid']))
|
||||||
|
for s in self.cfg['sources']:
|
||||||
|
if 'sig' in s:
|
||||||
|
for k in s['sig']['keys']:
|
||||||
|
if not valid.gpgkeyID(k):
|
||||||
|
raise ValueError('{0} is not a valid GPG Key ID/fingerprint'.format(k))
|
||||||
|
return()
|
48
bdisk/download.py
Normal file
48
bdisk/download.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
class Download(object):
|
||||||
|
def __init__(self, url, progress = True, offset = None, chunksize = 1024):
|
||||||
|
self.cnt_len = None
|
||||||
|
self.head = requests.head(url, allow_redirects = True).headers
|
||||||
|
self.req_headers = {}
|
||||||
|
self.range = False
|
||||||
|
self.url = url
|
||||||
|
self.offset = offset
|
||||||
|
self.chunksize = chunksize
|
||||||
|
self.progress = progress
|
||||||
|
if 'accept-ranges' in self.head:
|
||||||
|
if self.head['accept-ranges'].lower() != 'none':
|
||||||
|
self.range = True
|
||||||
|
if 'content-length' in self.head:
|
||||||
|
try:
|
||||||
|
self.cnt_len = int(self.head['content-length'])
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
if self.cnt_len and self.offset and self.range:
|
||||||
|
if not self.offset <= self.cnt_len:
|
||||||
|
raise ValueError(('The offset requested ({0}) is greater than '
|
||||||
|
'the content-length value').format(self.offset, self.cnt_len))
|
||||||
|
self.req_headers['range'] = 'bytes={0}-'.format(self.offset)
|
||||||
|
|
||||||
|
def fetch(self):
|
||||||
|
if not self.progress:
|
||||||
|
self.req = requests.get(self.url, allow_redirects = True, headers = self.req_headers)
|
||||||
|
self.bytes_obj = self.req.content
|
||||||
|
else:
|
||||||
|
self.req = requests.get(self.url, allow_redirects = True, stream = True, headers = self.req_headers)
|
||||||
|
self.bytes_obj = bytes()
|
||||||
|
_bytelen = 0
|
||||||
|
# TODO: better handling for logging instead of print()s?
|
||||||
|
for chunk in self.req.iter_content(chunk_size = self.chunksize):
|
||||||
|
self.bytes_obj += chunk
|
||||||
|
if self.cnt_len:
|
||||||
|
print('\033[F')
|
||||||
|
print('{0:.2f}'.format((_bytelen / float(self.head['content-length'])) * 100),
|
||||||
|
end = '%',
|
||||||
|
flush = True)
|
||||||
|
_bytelen += self.chunksize
|
||||||
|
else:
|
||||||
|
print('.', end = '')
|
||||||
|
print()
|
||||||
|
return(self.bytes_obj)
|
76
bdisk/env_prep.py
Normal file
76
bdisk/env_prep.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
import hashlib
|
||||||
|
import importlib # needed for the guest-os-specific stuff...
|
||||||
|
import os
|
||||||
|
import download # LOCAL
|
||||||
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
|
|
||||||
|
def hashsum_downloader(url, filename = None):
|
||||||
|
# TODO: support "latest" and "regex" flags? or remove from specs (since the tarball can be specified by these)?
|
||||||
|
# move that to the download.Download() class?
|
||||||
|
d = download.Download(url, progress = False)
|
||||||
|
hashes = {os.path.basename(k):v for (v, k) in [line.split() for line in d.fetch().decode('utf-8').splitlines()]}
|
||||||
|
if filename:
|
||||||
|
if filename in hashes:
|
||||||
|
return(hashes[filename])
|
||||||
|
else:
|
||||||
|
raise KeyError('Filename {0} not in the list of hashes'.format(filename))
|
||||||
|
return(hashes)
|
||||||
|
|
||||||
|
|
||||||
|
class Prepper(object):
|
||||||
|
# Prepare sources, destinations, etc.
|
||||||
|
def __init__(self, cfg):
|
||||||
|
self.cfg = cfg
|
||||||
|
self.CreateDirs(self.cfg['build']['paths'])
|
||||||
|
if 'handler' not in self.cfg['gpg'] or not self.cfg['gpg']['handler']:
|
||||||
|
if self.cfg['gpg']['gnupghome']:
|
||||||
|
os.environ['GNUPGHOME'] = self.cfg['gpg']['gnupghome']
|
||||||
|
from . import GPG
|
||||||
|
self.cfg['gpg']['handler'] = GPG.GPGHandler(gnupg_homedir = self.cfg['gpg']['gnupghome'],
|
||||||
|
key_id = self.cfg['gpg']['keyid'])
|
||||||
|
self.gpg = self.cfg['gpg']['handler']
|
||||||
|
for idx, s in enumerate(self.cfg['sources']):
|
||||||
|
self._download(idx)
|
||||||
|
|
||||||
|
def CreateDirs(self, dirs):
|
||||||
|
for d in dirs:
|
||||||
|
os.makedirs(d, exist_ok = True)
|
||||||
|
os.chmod(d, 0o700)
|
||||||
|
return()
|
||||||
|
|
||||||
|
def _download(self, source_idx):
|
||||||
|
download = True
|
||||||
|
_source = self.cfg['sources'][source_idx]
|
||||||
|
_dest_dir = os.path.join(self.cfg['build']['paths']['cache'], source_idx)
|
||||||
|
_tarball = os.path.join(_dest_dir, _source['tarball']['fname'])
|
||||||
|
_remote_dir = urljoin(_source['mirror'], _source['rootpath'])
|
||||||
|
_remote_tarball = urljoin(_remote_dir + '/', _source['tarball']['fname'])
|
||||||
|
def _hash_verify(): # TODO: move to utils.valid()?
|
||||||
|
# Get a checksum.
|
||||||
|
if 'checksum' in _source:
|
||||||
|
if not _source['checksum']['explicit']:
|
||||||
|
_source['checksum']['value'] = hashsum_downloader(urljoin(_remote_dir + '/',
|
||||||
|
_source['checksum']['fname']))
|
||||||
|
if not _source['checksum']['hash_algo']:
|
||||||
|
_source['checksum']['hash_algo'] = utils.detect.any_hash(_source['checksum']['value'],
|
||||||
|
normalize = True)[0]
|
||||||
|
_hash = hashlib.new(_source['checksum']['hash_algo'])
|
||||||
|
with open(_tarball, 'rb') as f:
|
||||||
|
# It's potentially a large file, so we chunk it 64kb at a time.
|
||||||
|
_hashbuf = f.read(64000)
|
||||||
|
while len(_hashbuf) > 0:
|
||||||
|
_hash.update(_hashbuf)
|
||||||
|
_hashbuf = f.read(64000)
|
||||||
|
if _hash.hexdigest().lower() != _source['checksum']['value'].lower():
|
||||||
|
return(False)
|
||||||
|
return(True)
|
||||||
|
def _sig_verify(): # TODO: move to utils.valid()?
|
||||||
|
if 'sig' in _source:
|
||||||
|
pass
|
||||||
|
return(True)
|
||||||
|
if os.path.isfile(_tarball):
|
||||||
|
download = _hash_verify()
|
||||||
|
download = _sig_verify()
|
||||||
|
if download:
|
||||||
|
d = download.Download(_remote_tarball)
|
1
bdisk/guests/antergos.py
Symbolic link
1
bdisk/guests/antergos.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
archlinux.py
|
1
bdisk/guests/arch.py
Symbolic link
1
bdisk/guests/arch.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
archlinux.py
|
128
bdisk/guests/archlinux.py
Normal file
128
bdisk/guests/archlinux.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
from .. import utils # LOCAL # do i need to escalate two levels up?
|
||||||
|
|
||||||
|
class Manifest(object):
|
||||||
|
def __init__(self, cfg):
|
||||||
|
self.cfg = cfg
|
||||||
|
self.name = 'archlinux'
|
||||||
|
self.version = None # rolling release
|
||||||
|
self.release = None # rolling release
|
||||||
|
# https://www.archlinux.org/master-keys/
|
||||||
|
# Pierre Schmitz. https://www.archlinux.org/people/developers/#pierre
|
||||||
|
self.gpg_authorities = ['4AA4767BBC9C4B1D18AE28B77F2D434B9741E8AC']
|
||||||
|
self.tarball = None
|
||||||
|
self.sig = None
|
||||||
|
self.mirror = None
|
||||||
|
self.checksum = {'sha1': None,
|
||||||
|
'md5': None}
|
||||||
|
self.verified = False
|
||||||
|
self.arches = ('x86_64', )
|
||||||
|
self.bootsupport = ('uefi', 'bios', 'pxe', 'ipxe', 'iso')
|
||||||
|
self.kernel = '/boot/vmlinuz-linux'
|
||||||
|
self.initrd = '/boot/initramfs-linux.img'
|
||||||
|
# TODO: can this be trimmed down?
|
||||||
|
self.prereqs = ['arch-install-scripts', 'archiso', 'bzip2', 'coreutils', 'customizepkg-scripting', 'cronie',
|
||||||
|
'dhclient', 'dhcp', 'dhcpcd', 'dosfstools', 'dropbear', 'efibootmgr', 'efitools', 'efivar',
|
||||||
|
'file', 'findutils', 'iproute2', 'iputils', 'libisoburn', 'localepurge', 'lz4', 'lzo',
|
||||||
|
'lzop', 'mkinitcpio-nbd', 'mkinitcpio-nfs-utils', 'mkinitcpio-utils', 'nbd', 'ms-sys',
|
||||||
|
'mtools', 'net-tools', 'netctl', 'networkmanager', 'pv', 'python', 'python-pyroute2',
|
||||||
|
'rsync', 'sed', 'shorewall', 'squashfs-tools', 'sudo', 'sysfsutils', 'syslinux',
|
||||||
|
'traceroute', 'vi']
|
||||||
|
self._get_filenames()
|
||||||
|
|
||||||
|
def _get_filenames(self):
|
||||||
|
# TODO: cache this info
|
||||||
|
webroot = 'iso/latest'
|
||||||
|
for m in self.cfg['mirrors']:
|
||||||
|
uri = os.path.join(m, webroot)
|
||||||
|
try:
|
||||||
|
self.tarball = utils.detect().remote_files(uri, regex = ('archlinux-'
|
||||||
|
'bootstrap-'
|
||||||
|
'[0-9]{4}\.'
|
||||||
|
'[0-9]{2}\.'
|
||||||
|
'[0-9]{2}-'
|
||||||
|
'x86_64\.tar\.gz$'))[0]
|
||||||
|
self.sig = '{0}.sig'.format(self.tarball)
|
||||||
|
for h in self.checksum:
|
||||||
|
self.checksum[h] = os.path.join(uri, '{0}sums.txt'.format(h))
|
||||||
|
self.mirror = m
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
if not self.tarball:
|
||||||
|
raise ValueError('Could not find the tarball URI. Check your network connection.')
|
||||||
|
return()
|
||||||
|
|
||||||
|
|
||||||
|
def extern_prep(cfg, cur_arch = 'x86_64'):
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
mirrorlist = os.path.join(cfg['build']['paths']['chroot'],
|
||||||
|
cur_arch,
|
||||||
|
'etc/pacman.d/mirrorlist')
|
||||||
|
with open(mirrorlist, 'r') as f:
|
||||||
|
mirrors = []
|
||||||
|
for i in f.readlines():
|
||||||
|
m = re.sub('^\s*#.*$', '', i.strip())
|
||||||
|
if m != '':
|
||||||
|
mirrors.append(m)
|
||||||
|
if not mirrors:
|
||||||
|
# We do this as a fail-safe.
|
||||||
|
mirror = ('\n\n# Added by BDisk\n'
|
||||||
|
'Server = https://arch.mirror.square-r00t.net/'
|
||||||
|
'$repo/os/$arch\n')
|
||||||
|
with open(mirrorlist, 'a') as f:
|
||||||
|
f.write(mirror)
|
||||||
|
return()
|
||||||
|
|
||||||
|
# This will be run before the regular packages are installed. It can be
|
||||||
|
# whatever script you like, as long as it has the proper shebang and doesn't
|
||||||
|
# need additional packages installed.
|
||||||
|
# In Arch's case, we use it for initializing the keyring and installing an AUR
|
||||||
|
# helper.
|
||||||
|
pkg_mgr_prep = """#!/bin/bash
|
||||||
|
|
||||||
|
pacman -Syy
|
||||||
|
pacman-key --init
|
||||||
|
pacman-key --populate archlinux
|
||||||
|
pacman -S --noconfirm --needed base
|
||||||
|
pacman -S --noconfirm --needed base-devel multilib-devel git linux-headers \
|
||||||
|
mercurial subversion vala xorg-server-devel
|
||||||
|
cd /tmp
|
||||||
|
sqrt="https://git.square-r00t.net/BDisk/plain/external"
|
||||||
|
# Temporary until there's another AUR helper that allows dropping privs AND
|
||||||
|
# automatically importing GPG keys.
|
||||||
|
pkg="${sqrt}/apacman-current.pkg.tar.xz?h=4.x_rewrite"
|
||||||
|
curl -sL -o apacman-current.pkg.tar.xz ${pkg}
|
||||||
|
pacman -U --noconfirm apacman-current.pkg.tar.xz
|
||||||
|
rm apacman*
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Special values:
|
||||||
|
# {PACKAGE} = the package name
|
||||||
|
# {VERSION} = the version specified in the <package version= ...> attribute
|
||||||
|
# {REPO} = the repository specified in the <package repo= ...> attribute
|
||||||
|
# If check_cmds are needed to run before installing, set pre_check to True.
|
||||||
|
# Return code 0 means the package is installed already, anything else means we
|
||||||
|
# should try to install it.
|
||||||
|
#### AUR SUPPORT ####
|
||||||
|
packager = {'pre_check': False,
|
||||||
|
'sys_update': ['/usr/bin/apacman', '-S', '-u'],
|
||||||
|
'sync_cmd': ['/usr/bin/apacman', '-S', '-y', '-y'],
|
||||||
|
'check_cmds': {'versioned': ['/usr/bin/pacman',
|
||||||
|
'-Q', '-s',
|
||||||
|
'{PACKAGE}'],
|
||||||
|
'unversioned': ['/usr/bin/pacman',
|
||||||
|
'-Q', '-s',
|
||||||
|
'{PACKAGE}']
|
||||||
|
},
|
||||||
|
'update_cmds': {'versioned': ['/usr/bin/pacman',
|
||||||
|
'-S', '-u',
|
||||||
|
'{PACKAGE}'],
|
||||||
|
'unversioned': ['/usr/bin/pacman',
|
||||||
|
'-S', '-u',
|
||||||
|
'{PACKAGE}']
|
||||||
|
},
|
||||||
|
}
|
1
bdisk/guests/manjaro.py
Symbolic link
1
bdisk/guests/manjaro.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
archlinux.py
|
194
bdisk/host.py
194
bdisk/host.py
@ -1,194 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import glob
|
|
||||||
import configparser
|
|
||||||
import validators
|
|
||||||
import git
|
|
||||||
import datetime
|
|
||||||
from socket import getaddrinfo
|
|
||||||
|
|
||||||
def getOS():
|
|
||||||
# Returns one of: SuSE, debian, fedora, redhat, centos, mandrake,
|
|
||||||
# mandriva, rocks, slackware, yellowdog, gentoo, UnitedLinux,
|
|
||||||
# turbolinux, arch, mageia
|
|
||||||
distro = list(platform.linux_distribution())[0].lower()
|
|
||||||
return(distro)
|
|
||||||
|
|
||||||
def getBits():
|
|
||||||
bits = list(platform.architecture())[0]
|
|
||||||
return(bits)
|
|
||||||
|
|
||||||
def getHostname():
|
|
||||||
hostname = platform.node()
|
|
||||||
return(hostname)
|
|
||||||
|
|
||||||
def getConfig(conf_file = '/etc/bdisk/build.ini'):
|
|
||||||
conf = False
|
|
||||||
# define some defailt conf paths in case we're installed by
|
|
||||||
# a package manager. in order of the paths we should search.
|
|
||||||
currentdir = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
currentdir_user = os.path.abspath('{0}/../build.ini'.format(currentdir))
|
|
||||||
currentdir_def = os.path.abspath('{0}/../extra/dist.build.ini'.format(currentdir))
|
|
||||||
default_conf_paths = ['/etc/bdisk/build.ini',
|
|
||||||
'/usr/share/bdisk/build.ini',
|
|
||||||
'/usr/share/bdisk/extra/build.ini',
|
|
||||||
'/usr/share/docs/bdisk/build.ini', # this is the preferred installation path for packagers
|
|
||||||
'/usr/local/etc/bdisk/build.ini',
|
|
||||||
'/usr/local/share/docs/bdisk/build.ini',
|
|
||||||
'/opt/dev/bdisk/build.ini',
|
|
||||||
'/opt/dev/bdisk/extra/build.ini',
|
|
||||||
'/opt/dev/bdisk/extra/dist.build.ini',
|
|
||||||
currentdir_user]
|
|
||||||
# if we weren't given one/using the default...
|
|
||||||
if conf_file == '/etc/bdisk/build.ini':
|
|
||||||
if not os.path.isfile(conf_file):
|
|
||||||
for p in default_conf_paths:
|
|
||||||
if os.path.isfile(p):
|
|
||||||
conf = p
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
conf = conf_file
|
|
||||||
else:
|
|
||||||
conf = conf_file
|
|
||||||
defconf = os.path.abspath('{0}/../extra/dist.build.ini'.format(os.path.dirname(os.path.realpath(__file__))))
|
|
||||||
if not conf:
|
|
||||||
# okay, so let's check for distributed/"blank" ini's
|
|
||||||
# since we can't seem to find one.
|
|
||||||
dist_conf_paths = [re.sub('(build\.ini)','dist.\\1', s) for s in default_conf_paths]
|
|
||||||
for q in dist_conf_paths:
|
|
||||||
if os.path.isfile(q):
|
|
||||||
conf = q
|
|
||||||
break
|
|
||||||
if os.path.isfile(default_conf_paths[4]):
|
|
||||||
defconf = default_conf_paths[4]
|
|
||||||
confs = [defconf, conf]
|
|
||||||
return(confs)
|
|
||||||
|
|
||||||
def parseConfig(confs):
|
|
||||||
config = configparser.ConfigParser()
|
|
||||||
config._interpolation = configparser.ExtendedInterpolation()
|
|
||||||
config.read(confs)
|
|
||||||
# a dict makes this so much easier.
|
|
||||||
config_dict = {s:dict(config.items(s)) for s in config.sections()}
|
|
||||||
# Convert the booleans to pythonic booleans in the dict...
|
|
||||||
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
|
|
||||||
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
|
|
||||||
config_dict['build']['ipxe'] = config['build'].getboolean('ipxe')
|
|
||||||
config_dict['build']['sign'] = config['build'].getboolean('sign')
|
|
||||||
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
|
|
||||||
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
|
|
||||||
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb')
|
|
||||||
config_dict['sync']['git'] = config['sync'].getboolean('git')
|
|
||||||
config_dict['sync']['http'] = config['sync'].getboolean('http')
|
|
||||||
config_dict['sync']['rsync'] = config['sync'].getboolean('rsync')
|
|
||||||
config_dict['sync']['tftp'] = config['sync'].getboolean('tftp')
|
|
||||||
config_dict['rsync']['iso'] = config['rsync'].getboolean('iso')
|
|
||||||
# Get the version...
|
|
||||||
# Two possibilities.
|
|
||||||
# e.g. 1 commit after tag with 7-digit object hex: ['v3.10', '1', 'gb4a5e40']
|
|
||||||
# Or if were sitting on a tag with no commits: ['v3.10']
|
|
||||||
# So we want our REAL version to be the following:
|
|
||||||
# Tagged release: v#.##
|
|
||||||
# X number of commits after release: v#.##rX
|
|
||||||
# Both have the (local) build number appended to the deliverables,
|
|
||||||
# which is reset for an empty isodir OR a new tagged release (incl.
|
|
||||||
# commits on top of a new tagged release). e.g. for build Y:
|
|
||||||
# v#.##-Y or v#.##rX-Y
|
|
||||||
if config_dict['bdisk']['ver'] == '':
|
|
||||||
try:
|
|
||||||
repo = git.Repo(config_dict['build']['basedir'])
|
|
||||||
refs = repo.git.describe(repo.head.commit).split('-')
|
|
||||||
if len(refs) >= 2:
|
|
||||||
config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[1]
|
|
||||||
else:
|
|
||||||
config_dict['bdisk']['ver'] = refs[0]
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} is NOT a valid git repository, and you did not specify bdisk:ver in your build.ini! ' +
|
|
||||||
'Did you perhaps install from a package manager? Please refer to the documentation.').format(datetime.datetime.now(),
|
|
||||||
config_dict['build']['basedir']))
|
|
||||||
# And the build number.
|
|
||||||
# TODO: support tracking builds per version. i.e. in buildnum:
|
|
||||||
# v2.51r13:0
|
|
||||||
# v2.51r17:3
|
|
||||||
if os.path.isfile(config_dict['build']['dlpath'] + '/buildnum'):
|
|
||||||
with open(config_dict['build']['dlpath'] + '/buildnum', 'r') as f:
|
|
||||||
config_dict['build']['buildnum'] = int(f.readlines()[0])
|
|
||||||
else:
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# But logically we should start the build over at 0 if we don't have any existing ISO's.
|
|
||||||
if os.path.isdir(config_dict['build']['isodir']):
|
|
||||||
if os.listdir(config_dict['build']['isodir']) == []:
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# ...or if we don't have any previous builds for this ISO version.
|
|
||||||
elif not glob.glob('{0}/*v{1}r*.iso'.format(config_dict['build']['isodir'], config_dict['bdisk']['ver'])):
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# and build a list of arch(es) we want to build
|
|
||||||
if config_dict['build']['multiarch'] in ('','yes','true','1','no','false','0'):
|
|
||||||
config_dict['build']['arch'] = ['x86_64','i686']
|
|
||||||
elif config_dict['build']['multiarch'] in ('x86_64','64','no32'):
|
|
||||||
config_dict['build']['arch'] = ['x86_64']
|
|
||||||
elif config_dict['build']['multiarch'] in ('i686','32','no64'):
|
|
||||||
config_dict['build']['arch'] = ['i686']
|
|
||||||
else:
|
|
||||||
exit(('{0}: ERROR: {1} is not a valid value. Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['build']['multiarch']))
|
|
||||||
## VALIDATORS ##
|
|
||||||
# Validate bootstrap mirror
|
|
||||||
config_dict['src'] = {}
|
|
||||||
for a in config_dict['build']['arch']:
|
|
||||||
config_dict['src'][a] = config_dict['source_' + a]
|
|
||||||
if (validators.domain(config_dict['src'][a]['mirror']) or validators.ipv4(
|
|
||||||
config_dict['src'][a]['mirror']) or validatords.ipv6(
|
|
||||||
config_dict['src'][a]['mirror'])):
|
|
||||||
try:
|
|
||||||
getaddrinfo(config_dict['src'][a]['mirror'], None)
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used as a ' +
|
|
||||||
'mirror for the bootstrap tarballs. Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['src'][a]['host']))
|
|
||||||
config_dict['src'][a]['gpg'] = config['source_' + a].getboolean('gpg')
|
|
||||||
# Are we rsyncing? If so, validate the rsync host.
|
|
||||||
# Works for IP address too. It does NOT check to see if we can
|
|
||||||
# actually *rsync* to it; that'll come later.
|
|
||||||
if config_dict['sync']['rsync']:
|
|
||||||
if (validators.domain(config_dict['rsync']['host']) or validators.ipv4(
|
|
||||||
config_dict['rsync']['host']) or validators.ipv6(
|
|
||||||
config_dict['rsync']['host'])):
|
|
||||||
try:
|
|
||||||
getaddrinfo(config_dict['rsync']['host'], None)
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used for rsyncing.' +
|
|
||||||
'Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['rsync']['host']))
|
|
||||||
else:
|
|
||||||
exit(('{0}: ERROR: {1} is not a valid host and cannot be used for rsyncing.' +
|
|
||||||
'Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['rsync']['host']))
|
|
||||||
# Validate the URI.
|
|
||||||
if config_dict['build']['ipxe']:
|
|
||||||
# so this won't validate e.g. custom LAN domains (https://pxeserver/bdisk.php). TODO.
|
|
||||||
if not validators.url(config_dict['ipxe']['uri']):
|
|
||||||
if not re.match('^https?://localhost(/.*)?$'):
|
|
||||||
exit('{0}: ERROR: {1} is not a valid URL/URI. Check your configuration.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['ipxe']['uri']))
|
|
||||||
# Validate required paths
|
|
||||||
if not os.path.exists(config_dict['build']['basedir'] + '/extra'):
|
|
||||||
exit(("{0}: ERROR: {1} does not contain BDisk's core files!" +
|
|
||||||
"Check your configuration.").format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['build']['basedir']))
|
|
||||||
# Make dirs if they don't exist
|
|
||||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
|
||||||
os.makedirs(config_dict['build'][d], exist_ok = True)
|
|
||||||
# Make dirs for sync staging if we need to
|
|
||||||
for x in ('http', 'tftp'):
|
|
||||||
if config_dict['sync'][x]:
|
|
||||||
os.makedirs(config_dict[x]['path'], exist_ok = True)
|
|
||||||
return(config, config_dict)
|
|
1
bdisk/iPXE.py
Normal file
1
bdisk/iPXE.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
import GIT # LOCAL
|
304
bdisk/ipxe.py
304
bdisk/ipxe.py
@ -1,304 +0,0 @@
|
|||||||
import os
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import jinja2
|
|
||||||
import git
|
|
||||||
import patch
|
|
||||||
import datetime
|
|
||||||
import humanize
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
|
|
||||||
def buildIPXE(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
mini = ipxe['iso']
|
|
||||||
prepdir = conf['build']['prepdir']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
ipxe_tpl = templates_dir + '/iPXE'
|
|
||||||
srcdir = build['srcdir']
|
|
||||||
embedscript = build['dlpath'] + '/EMBED'
|
|
||||||
ipxe_src = srcdir + '/ipxe'
|
|
||||||
#ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
|
|
||||||
ipxe_git_uri = 'http://git.ipxe.org/ipxe.git'
|
|
||||||
print('{0}: [IPXE] Prep/fetch sources...'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
# Get the source
|
|
||||||
if os.path.isdir(ipxe_src):
|
|
||||||
shutil.rmtree(ipxe_src)
|
|
||||||
ipxe_repo = git.Repo.clone_from(ipxe_git_uri, ipxe_src)
|
|
||||||
# Generate patches
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(ipxe_tpl)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
tpl = env.get_template('EMBED.j2')
|
|
||||||
tpl_out = tpl.render(ipxe = ipxe)
|
|
||||||
with open(embedscript, 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# Feature enabling
|
|
||||||
# In config/general.h
|
|
||||||
with open('{0}/src/config/general.h'.format(ipxe_src), 'r') as f:
|
|
||||||
generalconf = f.read()
|
|
||||||
# And in config/console.h
|
|
||||||
with open('{0}/src/config/console.h'.format(ipxe_src), 'r') as f:
|
|
||||||
consoleconf = f.read()
|
|
||||||
patterns = (('^#undef(\s*NET_PROTO_IPV6.*)$','#define\g<1>'), # enable IPv6
|
|
||||||
('^#undef(\s*DOWNLOAD_PROTO_HTTPS)','#define\g<1>'), # enable HTTPS
|
|
||||||
('^//(#define\s*IMAGE_TRUST_CMD)','\g<1>'), # moar HTTPS
|
|
||||||
('^#undef(\s*DOWNLOAD_PROTO_FTP)','#define\g<1>')) # enable FTP
|
|
||||||
#('^//(#define\s*CONSOLE_CMD)','\g<1>'), # BROKEN in EFI? TODO. if enable, replace } with , above etc.
|
|
||||||
#('^//(#define\s*IMAGE_PNG','\g<1>'), # SAME, broken in EFI? TODO.
|
|
||||||
#console = ('^//(#define\s*CONSOLE_VESAFB)','\g<1>') # BROKEN in EFI? TODO.
|
|
||||||
# https://stackoverflow.com/a/4427835
|
|
||||||
# https://emilics.com/notebook/enblog/p869.html
|
|
||||||
# The above methods don't seem to work. it craps out on the pattern matchings
|
|
||||||
# so we use tuples instead.
|
|
||||||
for x in patterns:
|
|
||||||
generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
|
|
||||||
with open('{0}/src/config/general.h'.format(ipxe_src), 'w') as f:
|
|
||||||
f.write(generalconf)
|
|
||||||
# Uncomment when we want to test the above consdict etc.
|
|
||||||
#for x in patterns:
|
|
||||||
# generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
|
|
||||||
#with open('{0}/src/config/console.h'.format(ipxe_src), 'w') as f:
|
|
||||||
# f.write(console)
|
|
||||||
# Now we make!
|
|
||||||
cwd = os.getcwd()
|
|
||||||
os.chdir(ipxe_src + '/src')
|
|
||||||
modenv = os.environ.copy()
|
|
||||||
modenv['EMBED'] = embedscript
|
|
||||||
#modenv['TRUST'] = ipxe_ssl_ca # TODO: test these
|
|
||||||
#modenv['CERT'] = '{0},{1}'.format(ipxe_ssl_ca, ipxe_ssl_crt) # TODO: test these
|
|
||||||
#modenv['PRIVKEY'] = ipxe_ssl_ckey # TODO: test these
|
|
||||||
build_cmd = {}
|
|
||||||
build_cmd['base'] = ['/usr/bin/make',
|
|
||||||
'all',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
# TODO: copy the UNDI stuff/chainloader to tftpboot, if enabled
|
|
||||||
build_cmd['undi'] = ['/usr/bin/make',
|
|
||||||
'bin/ipxe.pxe',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
build_cmd['efi'] = ['/usr/bin/make',
|
|
||||||
'bin-i386-efi/ipxe.efi',
|
|
||||||
'bin-x86_64-efi/ipxe.efi',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
# Now we call the commands.
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
if os.path.isfile(build['dlpath'] + '/ipxe.log'):
|
|
||||||
os.remove(build['dlpath'] + '/ipxe.log')
|
|
||||||
print(('{0}: [IPXE] Building iPXE ({1}). PROGRESS: tail -f {2}/ipxe.log ...').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
ipxe_src,
|
|
||||||
build['dlpath']))
|
|
||||||
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
|
|
||||||
subprocess.call(build_cmd['base'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
subprocess.call(build_cmd['undi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
|
|
||||||
os.chdir(cwd)
|
|
||||||
|
|
||||||
def genISO(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
arch = build['arch']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
ver = bdisk['ver']
|
|
||||||
isodir = build['isodir']
|
|
||||||
isofile = '{0}-{1}-{2}.mini.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
|
|
||||||
isopath = '{0}/{1}'.format(isodir, isofile)
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
mini = ipxe['iso']
|
|
||||||
iso = {}
|
|
||||||
srcdir = build['srcdir']
|
|
||||||
ipxe_src = srcdir + '/ipxe'
|
|
||||||
mountpt = build['mountpt']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates/iPXE/'
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
bootdir = '{0}/ipxe_mini'.format(dlpath)
|
|
||||||
efiboot_img = '{0}/EFI/{1}/efiboot.img'.format(bootdir, bdisk['name'])
|
|
||||||
innerefi64 = '{0}/src/bin-x86_64-efi/ipxe.efi'.format(ipxe_src)
|
|
||||||
efi = False
|
|
||||||
# this shouldn't be necessary... if it is, we can revisit this in the future. see "Inner dir" below.
|
|
||||||
#innerefi32 = '{0}/src/bin-i386-efi/ipxe.efi'.format(ipxe_src)
|
|
||||||
# We only need to do EFI prep if we have UEFI/x86_64 support. See above, but IA64 is dead, Zed.
|
|
||||||
if mini and (('x86_64') in arch):
|
|
||||||
efi = True
|
|
||||||
# EFI prep/building
|
|
||||||
print('{0}: [IPXE] UEFI support for Mini ISO...'.format(datetime.datetime.now()))
|
|
||||||
if os.path.isdir(bootdir):
|
|
||||||
shutil.rmtree(bootdir)
|
|
||||||
os.makedirs(os.path.dirname(efiboot_img), exist_ok = True) # FAT32 embedded EFI dir
|
|
||||||
os.makedirs('{0}/EFI/boot'.format(bootdir), exist_ok = True) # EFI bootloader binary dir
|
|
||||||
# Inner dir (miniboot.img file)
|
|
||||||
#sizetotal = 2097152 # 2MB wiggle room. increase this if we add IA64.
|
|
||||||
sizetotal = 34603008 # 33MB wiggle room. increase this if we add IA64.
|
|
||||||
sizetotal += os.path.getsize(innerefi64)
|
|
||||||
sizefiles = ['HashTool', 'PreLoader']
|
|
||||||
for f in sizefiles:
|
|
||||||
sizetotal += os.path.getsize('{0}/root.x86_64/usr/share/efitools/efi/{1}.efi'.format(
|
|
||||||
chrootdir,
|
|
||||||
f))
|
|
||||||
# These won't be *quite* accurate since it's before the template substitution,
|
|
||||||
# but it'll be close enough.
|
|
||||||
for (path, dirs, files) in os.walk(templates_dir):
|
|
||||||
for file in files:
|
|
||||||
fname = os.path.join(path, file)
|
|
||||||
sizetotal += os.path.getsize(fname)
|
|
||||||
print("{0}: [IPXE] Creating EFI ESP image {1} ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
efiboot_img,
|
|
||||||
humanize.naturalsize(sizetotal)))
|
|
||||||
if os.path.isfile(efiboot_img):
|
|
||||||
os.remove(efiboot_img)
|
|
||||||
with open(efiboot_img, 'wb+') as f:
|
|
||||||
f.truncate(sizetotal)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', 'iPXE_EFI', efiboot_img]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
cmd = ['/bin/mount', efiboot_img, mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
os.makedirs(mountpt + '/EFI/boot', exist_ok = True) # "Inner" (EFI image)
|
|
||||||
#os.makedirs('{0}/EFI/{1}'.format(mountpt, bdisk['name']), exist_ok = True) # "Inner" (EFI image)
|
|
||||||
os.makedirs('{0}/boot'.format(bootdir), exist_ok = True) # kernel(s)
|
|
||||||
os.makedirs('{0}/loader/entries'.format(bootdir), exist_ok = True) # EFI
|
|
||||||
for d in (mountpt, bootdir):
|
|
||||||
shutil.copy2(innerefi64,'{0}/EFI/boot/ipxe.efi'.format(d))
|
|
||||||
for f in ('PreLoader.efi', 'HashTool.efi'):
|
|
||||||
if f == 'PreLoader.efi':
|
|
||||||
fname = 'bootx64.efi'
|
|
||||||
else:
|
|
||||||
fname = f
|
|
||||||
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir,f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(mountpt, fname), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir, f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(bootdir, fname), 'wb+') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And the systemd efi bootloader.
|
|
||||||
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
|
|
||||||
chrootdir),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/loader.efi'.format(mountpt), 'wb+') as f:
|
|
||||||
f.write(r.read())
|
|
||||||
|
|
||||||
# And loader entries.
|
|
||||||
os.makedirs('{0}/loader/entries'.format(mountpt, exist_ok = True))
|
|
||||||
for t in ('loader', 'base'):
|
|
||||||
if t == 'base':
|
|
||||||
name = bdisk['uxname']
|
|
||||||
tplpath = '{0}/loader/entries'.format(mountpt)
|
|
||||||
else:
|
|
||||||
name = t
|
|
||||||
tplpath = '{0}/loader'.format(mountpt)
|
|
||||||
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
cmd = ['/bin/umount', mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# Outer dir
|
|
||||||
outerdir = True
|
|
||||||
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True) # BIOS
|
|
||||||
# Loader entries (outer)
|
|
||||||
for t in ('loader','base'):
|
|
||||||
if t == 'base':
|
|
||||||
name = bdisk['uxname']
|
|
||||||
tplpath = '{0}/loader/entries'.format(bootdir)
|
|
||||||
else:
|
|
||||||
name = t
|
|
||||||
tplpath = '{0}/loader'.format(bootdir)
|
|
||||||
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, outerdir = outerdir)
|
|
||||||
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
if mini:
|
|
||||||
# BIOS prepping
|
|
||||||
shutil.copy2('{0}/src/bin/ipxe.lkrn'.format(ipxe_src), '{0}/boot/ipxe.krn'.format(bootdir))
|
|
||||||
isolinux_filelst = ['isolinux.bin',
|
|
||||||
'ldlinux.c32']
|
|
||||||
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True)
|
|
||||||
for f in isolinux_filelst:
|
|
||||||
shutil.copy2('{0}/root.{1}/usr/lib/syslinux/bios/{2}'.format(chrootdir, arch[0], f), '{0}/isolinux/{1}'.format(bootdir, f))
|
|
||||||
tpl = env.get_template('BIOS/isolinux.cfg.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open('{0}/isolinux/isolinux.cfg'.format(bootdir), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
print("{0}: [IPXE] Building Mini ISO ({1})...".format(datetime.datetime.now(), isopath))
|
|
||||||
if efi:
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'] + '_MINI',
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
|
|
||||||
'-eltorito-alt-boot',
|
|
||||||
'-e', 'EFI/{0}/{1}'.format(bdisk['name'], os.path.basename(efiboot_img)),
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
bootdir]
|
|
||||||
else:
|
|
||||||
# UNTESTED. TODO.
|
|
||||||
# I think i want to also get rid of: -boot-load-size 4,
|
|
||||||
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
|
|
||||||
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'] + '_MINI',
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
bootdir]
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# Get size of ISO
|
|
||||||
iso['name'] = ['Mini']
|
|
||||||
iso['Mini'] = {}
|
|
||||||
iso['Mini']['sha'] = hashlib.sha256()
|
|
||||||
with open(isopath, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
iso['Mini']['sha'].update(stream)
|
|
||||||
iso['Mini']['sha'] = iso['Mini']['sha'].hexdigest()
|
|
||||||
iso['Mini']['file'] = isopath
|
|
||||||
iso['Mini']['size'] = humanize.naturalsize(os.path.getsize(isopath))
|
|
||||||
iso['Mini']['type'] = 'Mini'
|
|
||||||
iso['Mini']['fmt'] = 'Hybrid ISO'
|
|
||||||
return(iso)
|
|
||||||
|
|
||||||
def tftpbootEnv(conf):
|
|
||||||
build = conf['build']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
sync = conf['sync']
|
|
||||||
if sync['tftp']:
|
|
||||||
pass # TODO: generate a pxelinux.cfg in bdisk/tftp.py (to write) and sync in the ipxe chainloader here
|
|
1
bdisk/logger.py
Normal file
1
bdisk/logger.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
import logging
|
32
bdisk/main.py
Normal file
32
bdisk/main.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import confparse # LOCAL
|
||||||
|
|
||||||
|
"""The primary user interface for BDisk. If we are running interactively,
|
||||||
|
parse arguments first, then initiate a BDisk session."""
|
||||||
|
|
||||||
|
def parseArgs():
|
||||||
|
args = argparse.ArgumentParser(description = ('An easy liveCD creator '
|
||||||
|
'built in python. Supports '
|
||||||
|
'hybrid ISOs/USB, iPXE, and '
|
||||||
|
'UEFI.'),
|
||||||
|
epilog = ('https://git.square-r00t.net'))
|
||||||
|
return(args)
|
||||||
|
|
||||||
|
def run(cfg):
|
||||||
|
cfg = confparse.Conf(cfg, validate_cfg = True)
|
||||||
|
cfg.parse_all()
|
||||||
|
|
||||||
|
|
||||||
|
def run_interactive():
|
||||||
|
args = vars(parseArgs().parse_args())
|
||||||
|
args['profile'] = {}
|
||||||
|
for i in ('name', 'id', 'uuid'):
|
||||||
|
args['profile'][i] = args[i]
|
||||||
|
del(args[i])
|
||||||
|
run(args)
|
||||||
|
return()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
396
bdisk/mtree.py
Executable file
396
bdisk/mtree.py
Executable file
@ -0,0 +1,396 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import copy
|
||||||
|
import datetime
|
||||||
|
import grp
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import platform
|
||||||
|
import pwd
|
||||||
|
import re
|
||||||
|
import stat
|
||||||
|
from collections import OrderedDict
|
||||||
|
try:
|
||||||
|
import pycksum
|
||||||
|
has_cksum = True
|
||||||
|
except ImportError:
|
||||||
|
has_cksum = False
|
||||||
|
|
||||||
|
# Parse BSD mtree spec files.
|
||||||
|
# On arch, BSD mtree is ported in the AUR as nmtree.
|
||||||
|
# TODO: add a generator class as well? (in process)
|
||||||
|
# TODO: add a checking function as well?
|
||||||
|
|
||||||
|
# The format used for headers
|
||||||
|
_header_strptime_fmt = '%a %b %d %H:%M:%S %Y'
|
||||||
|
|
||||||
|
# Supported hash types (for generation). These are globally available always.
|
||||||
|
_hashtypes = ['md5', 'sha1', 'sha256', 'sha384', 'sha512']
|
||||||
|
# If RIPEMD-160 is supported, we add it (after MD5).
|
||||||
|
if 'ripemd160' in hashlib.algorithms_available:
|
||||||
|
_hashtypes.insert(1, 'rmd160')
|
||||||
|
|
||||||
|
# Iterative to determine which type an item is.
|
||||||
|
_stype_map = {'block': stat.S_ISBLK,
|
||||||
|
'char': stat.S_ISCHR,
|
||||||
|
'dir': stat.S_ISDIR,
|
||||||
|
'fifo': stat.S_ISFIFO,
|
||||||
|
'file': stat.S_ISREG,
|
||||||
|
'link': stat.S_ISLNK,
|
||||||
|
'socket': stat.S_ISSOCK}
|
||||||
|
|
||||||
|
# Regex pattern for cleaning up an octal perm mode into a string representation.
|
||||||
|
_octre = re.compile('^0o')
|
||||||
|
|
||||||
|
class MTreeGen(object):
|
||||||
|
def __init__(self, path):
|
||||||
|
self.path = pathlib.PosixPath(os.path.abspath(os.path.expanduser(path)))
|
||||||
|
# These are used to keep a cached copy of the info.
|
||||||
|
self._sysinfo = {'uids': {}, 'gids': {}}
|
||||||
|
self._build_header()
|
||||||
|
# We use this to keep track of where we are exactly in the tree so we can generate a full absolute path at
|
||||||
|
# any moment relative to the tree.
|
||||||
|
self._path_pointer = copy.deepcopy(self.path)
|
||||||
|
|
||||||
|
|
||||||
|
def paths_iterator(self):
|
||||||
|
for root, dirs, files in os.walk(self.path):
|
||||||
|
for f in files:
|
||||||
|
_fname = self.path.joinpath(f)
|
||||||
|
_stats = self._get_stats(_fname)
|
||||||
|
if not _stats:
|
||||||
|
print(('WARNING: {0} either disappeared while we were trying to parse it or '
|
||||||
|
'it is a broken symlink.').format(_fname))
|
||||||
|
continue
|
||||||
|
# TODO: get /set line here?
|
||||||
|
item = ' {0} \\\n'.format(f)
|
||||||
|
_type = 'file' # TODO: stat this more accurately
|
||||||
|
_cksum = self._gen_cksum(_fname)
|
||||||
|
item += ' {0} {1} {2}\\\n'.format(_stats['size'],
|
||||||
|
_stats['time'],
|
||||||
|
('{0} '.format(_cksum) if _cksum else ''))
|
||||||
|
# TODO: here's where the hashes would get added
|
||||||
|
# TODO: here's where we parse dirs. maybe do that before files?
|
||||||
|
# remember: mtree specs use ..'s to traverse upwards when done with a dir
|
||||||
|
for d in dirs:
|
||||||
|
_dname = self.path.joinpath(d)
|
||||||
|
_stats = self._get_stats(_dname)
|
||||||
|
if not _stats:
|
||||||
|
print(('WARNING: {0} either disappeared while we were trying to parse it or '
|
||||||
|
'it is a broken symlink.').format(_dname))
|
||||||
|
continue
|
||||||
|
# TODO: get /set line here?
|
||||||
|
return()
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_cksum(self, fpath):
|
||||||
|
if not has_cksum:
|
||||||
|
return(None)
|
||||||
|
if not os.path.isfile(fpath):
|
||||||
|
return(None)
|
||||||
|
# TODO: waiting on https://github.com/sobotklp/pycksum/issues/2 for byte iteration (because large files maybe?)
|
||||||
|
c = pycksum.Cksum()
|
||||||
|
with open(fpath, 'rb') as f:
|
||||||
|
c.add(f)
|
||||||
|
return(c.get_cksum())
|
||||||
|
|
||||||
|
|
||||||
|
def _get_stats(self, path):
|
||||||
|
stats = {}
|
||||||
|
try:
|
||||||
|
_st = os.stat(path, follow_symlinks = False)
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Broken symlink? Shouldn't occur since follow_symlinks is False anyways, BUT...
|
||||||
|
return(None)
|
||||||
|
# Ownership
|
||||||
|
stats['uid'] = _st.st_uid
|
||||||
|
stats['gid'] = _st.st_gid
|
||||||
|
if _st.st_uid in self._sysinfo['uids']:
|
||||||
|
stats['uname'] = self._sysinfo['uids'][_st.st_uid]
|
||||||
|
else:
|
||||||
|
_pw = pwd.getpwuid(_st.st_uid).pw_name
|
||||||
|
stats['uname'] = _pw
|
||||||
|
self._sysinfo['uids'][_st.stuid] = _pw
|
||||||
|
if _st.st_gid in self._sysinfo['gids']:
|
||||||
|
stats['gname'] = self._sysinfo['gids'][_st.st_gid]
|
||||||
|
else:
|
||||||
|
_grp = grp.getgrgid(_st.st_gid).gr_name
|
||||||
|
stats['gname'] = _grp
|
||||||
|
self._sysinfo['gids'][_st.stgid] = _grp
|
||||||
|
# Type and Mode
|
||||||
|
for t in _stype_map:
|
||||||
|
if _stype_map[t](_st.st_mode):
|
||||||
|
stats['type'] = t
|
||||||
|
# TODO: need a reliable way of parsing this.
|
||||||
|
# for instance, for /dev/autofs, _st.st_dev = 6 (os.makedev(6) confirms major is 0, minor is 6)
|
||||||
|
# but netBSD mtree (ported) says it's "0xaeb" (2795? or, as str, "®b" apparently).
|
||||||
|
# I'm guessing the kernel determines this, but where is it pulling it from/how?
|
||||||
|
# We can probably do 'format,major,minor' (or, for above, 'linux,0,6').
|
||||||
|
# if t in ('block', 'char'):
|
||||||
|
# stats['device'] = None
|
||||||
|
# Handle symlinks.
|
||||||
|
if t == 'link':
|
||||||
|
_target = path
|
||||||
|
while os.path.islink(_target):
|
||||||
|
_target = os.path.realpath(_target)
|
||||||
|
stats['link'] = _target
|
||||||
|
break
|
||||||
|
stats['mode'] = '{0:0>4}'.format(_octre.sub('', str(oct(stat.S_IMODE(_st.st_mode)))))
|
||||||
|
stats['size'] = _st.st_size
|
||||||
|
stats['time'] = str(float(_st.st_mtime))
|
||||||
|
stats['nlink'] = _st.st_nlink
|
||||||
|
# TODO: "flags" keyword? is that meaningful on linux?
|
||||||
|
stats['flags'] = 'none'
|
||||||
|
return(stats)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _gen_hashes(self, fpath):
|
||||||
|
hashes = OrderedDict({})
|
||||||
|
if not os.path.isfile(fpath):
|
||||||
|
return(hashes)
|
||||||
|
_hashnums = len(_hashtypes)
|
||||||
|
for idx, h in enumerate(_hashtypes):
|
||||||
|
# Stupid naming inconsistencies.
|
||||||
|
_hashname = (h if h is not 'rmd160' else 'ripemd160')
|
||||||
|
_hasher = hashlib.new(_hashname)
|
||||||
|
with open(fpath, 'rb') as f:
|
||||||
|
# Hash 64kb at a time in case it's a huge file. TODO: is this the most ideal chunk size?
|
||||||
|
_hashbuf = f.read(64000)
|
||||||
|
while len(_hashbuf) > 0:
|
||||||
|
_hasher.update(_hashbuf)
|
||||||
|
_hashbuf = f.read(64000)
|
||||||
|
hashes[h] = _hasher.hexdigest()
|
||||||
|
return(hashes)
|
||||||
|
# if idx + 1 < _hashnums:
|
||||||
|
# hashes += ' {0}={1} \\\n'.format(h, _hasher.hexdigest())
|
||||||
|
# else:
|
||||||
|
# hashes += ' {0}={1}\n'.format(h, _hasher.hexdigest())
|
||||||
|
# return(hashes)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_header(self):
|
||||||
|
self.spec = ''
|
||||||
|
_header = OrderedDict({})
|
||||||
|
_header['user'] = pwd.getpwuid(os.geteuid()).pw_name
|
||||||
|
_header['machine'] = platform.node()
|
||||||
|
_header['tree'] = str(self.path)
|
||||||
|
_header['date'] = datetime.datetime.utcnow().strftime(_header_strptime_fmt)
|
||||||
|
for h in _header:
|
||||||
|
self.spec += '#\t{0:>7}: {1}\n'.format(h, _header[h])
|
||||||
|
self.spec += '\n'
|
||||||
|
return()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class MTreeParse(object):
|
||||||
|
def __init__(self, spec):
|
||||||
|
if not isinstance(spec, (str, bytes)):
|
||||||
|
raise ValueError('spec must be a raw string of the spec or a bytes object of the string')
|
||||||
|
if isinstance(spec, bytes):
|
||||||
|
try:
|
||||||
|
spec = spec.decode('utf-8')
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
raise ValueError('spec must be a utf-8 encoded set of bytes if using byte mode')
|
||||||
|
self.orig_spec = copy.deepcopy(spec) # For referencing in case someone wanted to write it out.
|
||||||
|
# We NOW need to handle the escaped linebreaking it does.
|
||||||
|
self._specdata = re.sub('\\\\\s+', '', spec).splitlines()
|
||||||
|
self._get_header()
|
||||||
|
self.spec = {'header': self.header,
|
||||||
|
'paths': {}}
|
||||||
|
# Template for an item.
|
||||||
|
# Default keywords are:
|
||||||
|
# flags, gid, link, mode, nlink, size, time, type, uid
|
||||||
|
self._tplitem = {
|
||||||
|
'type': None, # ('block', 'char', 'dir', 'fifo', 'file', 'link', 'socket')
|
||||||
|
# checksum of file (if it's a file) (int)
|
||||||
|
# On all *nix platforms, the cksum(1) utility (which is what the mtree spec uses) follows
|
||||||
|
# the POSIX standard CRC (which is NOT CRC-1/CRC-16 nor CRC32!):
|
||||||
|
# http://pubs.opengroup.org/onlinepubs/009695299/utilities/cksum.html
|
||||||
|
# For a python implementation,
|
||||||
|
# https://stackoverflow.com/questions/6835381/python-equivalent-of-unix-cksum-function
|
||||||
|
# See also crcmod (in PyPi).
|
||||||
|
'cksum': None,
|
||||||
|
# "The device number to use for block or char file types." Should be converted to a tuple of one
|
||||||
|
# of the following:
|
||||||
|
# - (format(str), major(int), minor(int))
|
||||||
|
# - (format(str), major(int), unit(str?), subunit(str?)) (only used on bsdos formats)
|
||||||
|
# - (number(int?), ) ("opaque" number)
|
||||||
|
# Valid formats are, per man page of mtree:
|
||||||
|
# native, 386bsd, 4bsd, bsdos, freebsd, hpux, isc, linux, netbsd, osf1, sco, solaris, sunos,
|
||||||
|
# svr3, svr4, ultrix
|
||||||
|
'device': None,
|
||||||
|
# File flags as symbolic name. BSD-specific thing? TODO: testing on BSD system
|
||||||
|
'flags': [],
|
||||||
|
'ignore': False, # An mtree-internal flag to ignore hierarchy under this item
|
||||||
|
'gid': None, # The group ID (int)
|
||||||
|
'gname': None, # The group name (str)
|
||||||
|
'link': None, # The link target/source, if a link.
|
||||||
|
# The MD5 checksum digest (str? hex?). "md5digest" is a synonym for this, so it's consolidated in
|
||||||
|
# as the same keyword.
|
||||||
|
'md5': None,
|
||||||
|
# The mode (in octal) (we convert it to a python-native int for os.chmod/stat, etc.)
|
||||||
|
# May also be a symbolic value; TODO: map symbolic to octal/int.
|
||||||
|
'mode': None,
|
||||||
|
'nlink': None, # Number of hard links for this item.
|
||||||
|
'optional': False, # This item may or may not be present in the compared directory for checking.
|
||||||
|
'rmd160': None, # The RMD-160 checksum of the file. "rmd160digest" is a synonym.
|
||||||
|
'sha1': None, # The SHA-1 sum. "sha1digest" is a synonym.
|
||||||
|
'sha256': None, # SHA-2 256-bit checksum; "sha256digest" is a synonym.
|
||||||
|
'sha384': None, # SHA-2 384-bit checksum; "sha384digest" is a synonym.
|
||||||
|
'sha512': None, # SHA-2 512-bit checksum; "sha512digest" is a synonym.
|
||||||
|
'size': None, # Size of the file in bytes (int).
|
||||||
|
'tags': [], # mtree-internal tags (comma-separated in the mtree spec).
|
||||||
|
'time': None, # Time the file was last modified (in Epoch fmt as float).
|
||||||
|
'uid': None, # File owner UID (int)
|
||||||
|
'uname': None # File owner username (str)
|
||||||
|
# And lastly, "children" is where the children files/directories go. We don't include it in the template;
|
||||||
|
# it's added programmatically.
|
||||||
|
# 'children': {}
|
||||||
|
}
|
||||||
|
# Global aspects are handled by "/set" directives.
|
||||||
|
# They are restored by an "/unset". Since they're global and stateful, they're handled as a class attribute.
|
||||||
|
self.settings = copy.deepcopy(self._tplitem)
|
||||||
|
self._parse_items()
|
||||||
|
del(self.settings, self._tplitem)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_header(self):
|
||||||
|
self.header = {}
|
||||||
|
_headre = re.compile('^#\s+(user|machine|tree|date):\s')
|
||||||
|
_cmtre = re.compile('^\s*#\s*')
|
||||||
|
_blklnre = re.compile('^\s*$')
|
||||||
|
for idx, line in enumerate(self._specdata):
|
||||||
|
if _headre.search(line): # We found a header item.
|
||||||
|
l = [i.lstrip() for i in _cmtre.sub('', line).split(':', 1)]
|
||||||
|
header = l[0]
|
||||||
|
val = (l[1] if l[1] is not '(null)' else None)
|
||||||
|
if header == 'date':
|
||||||
|
val = datetime.datetime.strptime(val, _header_strptime_fmt)
|
||||||
|
elif header == 'tree':
|
||||||
|
val = pathlib.PosixPath(val)
|
||||||
|
self.header[header] = val
|
||||||
|
elif _blklnre.search(line):
|
||||||
|
break # We've reached the end of the header. Otherwise...
|
||||||
|
else: # We definitely shouldn't be here, but this means the spec doesn't even have a header.
|
||||||
|
break
|
||||||
|
return()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_items(self):
|
||||||
|
# A pattern (compiled for performance) to match commands.
|
||||||
|
_stngsre = re.compile('^/(un)?set\s')
|
||||||
|
# Per the man page:
|
||||||
|
# "Empty lines and lines whose first non-whitespace character is a hash mark (‘#’) are ignored."
|
||||||
|
_ignre = re.compile('^(\s*(#.*)?)?$')
|
||||||
|
# The following regex is used to quickly and efficiently check for a synonymized hash name.
|
||||||
|
_hashre = re.compile('^(md5|rmd160|sha1|sha256|sha384|sha512)(digest)?$')
|
||||||
|
# The following regex is to test if we need to traverse upwards in the path.
|
||||||
|
_parentre = re.compile('^\.{,2}/?$')
|
||||||
|
# _curpath = self.header['tree']
|
||||||
|
_curpath = pathlib.PosixPath('/')
|
||||||
|
_types = ('block', 'char', 'dir', 'fifo', 'file', 'link', 'socket')
|
||||||
|
# This parses keywords. Used by both item specs and /set.
|
||||||
|
def _kwparse(kwline):
|
||||||
|
out = {}
|
||||||
|
for i in kwline:
|
||||||
|
l = i.split('=', 1)
|
||||||
|
if len(l) < 2:
|
||||||
|
l.append(None)
|
||||||
|
k, v = l
|
||||||
|
if v == 'none':
|
||||||
|
v = None
|
||||||
|
# These are represented as octals.
|
||||||
|
if k in ('mode', ):
|
||||||
|
# TODO: handle symbolic references too (e.g. rwxrwxrwx)
|
||||||
|
if v.isdigit():
|
||||||
|
v = int(v, 8) # Convert from the octal. This can then be used directly with os.chmod etc.
|
||||||
|
# These are represented as ints
|
||||||
|
elif k in ('uid', 'gid', 'cksum', 'nlink'):
|
||||||
|
if v.isdigit():
|
||||||
|
v = int(v)
|
||||||
|
# These are booleans (represented as True by their presence).
|
||||||
|
elif k in ('ignore', 'optional'):
|
||||||
|
v = True
|
||||||
|
# These are lists (comma-separated).
|
||||||
|
elif k in ('flags', 'tags'):
|
||||||
|
if v:
|
||||||
|
v = [i.strip() for i in v.split(',')]
|
||||||
|
# The following are synonyms.
|
||||||
|
elif _hashre.search(k):
|
||||||
|
k = _hashre.sub('\g<1>', k)
|
||||||
|
elif k == 'time':
|
||||||
|
v = datetime.datetime.fromtimestamp(float(v))
|
||||||
|
elif k == 'type':
|
||||||
|
if v not in _types:
|
||||||
|
raise ValueError('{0} not one of: {1}'.format(v, ', '.join(_types)))
|
||||||
|
out[k] = v
|
||||||
|
return(out)
|
||||||
|
def _unset_parse(unsetline):
|
||||||
|
out = {}
|
||||||
|
if unsetline[1] == 'all':
|
||||||
|
return(copy.deepcopy(self._tplitem))
|
||||||
|
for i in unsetline:
|
||||||
|
out[i] = self._tplitem[i]
|
||||||
|
return(out)
|
||||||
|
# The Business-End (TM)
|
||||||
|
for idx, line in enumerate(self._specdata):
|
||||||
|
_fname = copy.deepcopy(_curpath)
|
||||||
|
# Skip these lines
|
||||||
|
if _ignre.search(line):
|
||||||
|
continue
|
||||||
|
l = line.split()
|
||||||
|
if _parentre.search(line):
|
||||||
|
_curpath = _curpath.parent
|
||||||
|
elif not _stngsre.search(line):
|
||||||
|
# So it's an item, not a command.
|
||||||
|
_itemsettings = copy.deepcopy(self.settings)
|
||||||
|
_itemsettings.update(_kwparse(l[1:]))
|
||||||
|
if _itemsettings['type'] == 'dir':
|
||||||
|
# SOMEONE PLEASE let me know if there's a cleaner way to do this.
|
||||||
|
_curpath = pathlib.PosixPath(os.path.normpath(_curpath.joinpath(l[0])))
|
||||||
|
_fname = _curpath
|
||||||
|
else:
|
||||||
|
_fname = pathlib.PosixPath(os.path.normpath(_curpath.joinpath(l[0])))
|
||||||
|
self.spec['paths'][_fname] = _itemsettings
|
||||||
|
else:
|
||||||
|
# It's a command. We can safely split on whitespace since the man page specifies the
|
||||||
|
# values are not to contain whitespace.
|
||||||
|
# /set
|
||||||
|
if l[0] == '/set':
|
||||||
|
del(l[0])
|
||||||
|
self.settings.update(_kwparse(l))
|
||||||
|
# /unset
|
||||||
|
else:
|
||||||
|
self.settings.update(_unset_parse(l))
|
||||||
|
continue
|
||||||
|
return()
|
||||||
|
|
||||||
|
|
||||||
|
def parseArgs():
|
||||||
|
args = argparse.ArgumentParser(description = 'An mtree parser')
|
||||||
|
# TODO: support stdin piping
|
||||||
|
args.add_argument('specfile',
|
||||||
|
help = 'The path to the spec file to parse')
|
||||||
|
return(args)
|
||||||
|
|
||||||
|
|
||||||
|
# Allow to be run as a CLI utility as well.
|
||||||
|
def main():
|
||||||
|
args = vars(parseArgs().parse_args())
|
||||||
|
import os
|
||||||
|
with open(os.path.abspath(os.path.expanduser(args['specfile']))) as f:
|
||||||
|
mt = MTreeParse(f.read())
|
||||||
|
with open('/tmp/newspec', 'w') as f:
|
||||||
|
f.write('\n'.join(mt._specdata))
|
||||||
|
import pprint
|
||||||
|
import inspect
|
||||||
|
del(mt.orig_spec)
|
||||||
|
del(mt._specdata)
|
||||||
|
import shutil
|
||||||
|
pprint.pprint(inspect.getmembers(mt), width = shutil.get_terminal_size()[0])
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
375
bdisk/prep.py
375
bdisk/prep.py
@ -1,375 +0,0 @@
|
|||||||
import os
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import hashlib
|
|
||||||
import tarfile
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
import jinja2
|
|
||||||
import datetime
|
|
||||||
import humanize
|
|
||||||
from urllib.request import urlopen
|
|
||||||
import host # bdisk.host
|
|
||||||
import bGPG # bdisk.bGPG
|
|
||||||
|
|
||||||
|
|
||||||
def dirChk(conf):
|
|
||||||
# Make dirs if they don't exist
|
|
||||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
|
||||||
os.makedirs(conf['build'][d], exist_ok = True)
|
|
||||||
# Make dirs for sync staging if we need to
|
|
||||||
for x in ('http', 'tftp'):
|
|
||||||
if conf['sync'][x]:
|
|
||||||
os.makedirs(conf[x]['path'], exist_ok = True)
|
|
||||||
|
|
||||||
def downloadTarball(conf):
|
|
||||||
build = conf['build']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
src = conf['src']
|
|
||||||
arch = build['arch']
|
|
||||||
tarball_path = {}
|
|
||||||
for a in arch:
|
|
||||||
locsrc = conf['source_' + a]
|
|
||||||
mirror = locsrc['mirrorproto'] + '://' + locsrc['mirror']
|
|
||||||
rlsdir = mirror + locsrc['mirrorpath']
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
if locsrc['chksumtype'] == '':
|
|
||||||
exit("{0}: source_{1}:chksumtype is unset!".format(datetime.datetime.now(), a))
|
|
||||||
hash_type = locsrc['chksumtype']
|
|
||||||
hash_in = urlopen(mirror + locsrc['mirrorchksum'])
|
|
||||||
hashsums = hash_in.read()
|
|
||||||
hash_in.close()
|
|
||||||
hash_raw = hashsums.decode("utf-8")
|
|
||||||
hash_list = list(filter(None, hash_raw.split('\n')))
|
|
||||||
hash_dict = {x.split()[1]: x.split()[0] for x in hash_list}
|
|
||||||
# returns path/filename e.g. /some/path/to/file.tar.gz
|
|
||||||
# we use .gnupg since we'll need it later.
|
|
||||||
os.makedirs(dlpath + '/.gnupg', exist_ok = True)
|
|
||||||
tarball_path[a] = dlpath + '/.latest.' + a + '.tar'
|
|
||||||
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
|
|
||||||
if locsrc['mirrorfile'] != '':
|
|
||||||
tarball = locsrc['mirrorfile']
|
|
||||||
else:
|
|
||||||
tarball = [filename.group(0) for l in list(hash_dict.keys()) for filename in [pattern.search(l)] if filename][0]
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
hashsum = hash_dict[tarball]
|
|
||||||
if os.path.isfile(tarball_path[a]):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# fetch the tarball...
|
|
||||||
print("{0}: [PREP] Fetching tarball ({1} architecture)...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
a))
|
|
||||||
tarball_dl = urlopen(rlsdir + tarball)
|
|
||||||
with open(tarball_path[a], 'wb') as f:
|
|
||||||
f.write(tarball_dl.read())
|
|
||||||
tarball_dl.close()
|
|
||||||
print("{0}: [PREP] Done fetching {1} ({2}).".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball_path[a],
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball_path[a]))))
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
print("{0}: [PREP] Checking hash checksum {1} against {2}...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
hashsum,
|
|
||||||
tarball_path[a]))
|
|
||||||
# Calculate the checksum according to type specified.
|
|
||||||
tarball_hash = False
|
|
||||||
for i in hashlib.algorithms_available:
|
|
||||||
if hash_type == i:
|
|
||||||
hashfunc = getattr(hashlib, i)
|
|
||||||
tarball_hash = hashfunc(open(tarball_path[a], 'rb').read()).hexdigest()
|
|
||||||
break
|
|
||||||
if not tarball_hash:
|
|
||||||
exit("{0}: source_{1}:chksumtype '{2}' is not supported on this machine!".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
a,
|
|
||||||
hash_type))
|
|
||||||
if tarball_hash != hashsum:
|
|
||||||
exit(("{0}: {1} either did not download correctly\n\t\t\t or a wrong (probably old) version exists on the filesystem.\n\t\t\t " +
|
|
||||||
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
|
|
||||||
if locsrc['mirrorgpgsig'] != '':
|
|
||||||
# let's verify the signature.
|
|
||||||
if locsrc['mirrorgpgsig'] == '.sig':
|
|
||||||
gpgsig_remote = rlsdir + tarball + '.sig'
|
|
||||||
else:
|
|
||||||
gpgsig_remote = locsrc['mirrorgpgsig']
|
|
||||||
sig_dl = urlopen(gpgsig_remote)
|
|
||||||
sig = tarball_path[a] + '.sig'
|
|
||||||
with open(sig, 'wb+') as f:
|
|
||||||
f.write(sig_dl.read())
|
|
||||||
sig_dl.close()
|
|
||||||
gpg_verify = bGPG.gpgVerify(sig, tarball_path[a], conf)
|
|
||||||
if not gpg_verify:
|
|
||||||
exit("{0}: There was a failure checking {1} against {2}. Please investigate.".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
sig,
|
|
||||||
tarball_path[a]))
|
|
||||||
return(tarball_path)
|
|
||||||
|
|
||||||
def unpackTarball(tarball_path, build, keep = False):
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
if os.path.isdir(chrootdir):
|
|
||||||
if not keep:
|
|
||||||
# Make the dir if it doesn't exist
|
|
||||||
shutil.rmtree(chrootdir, ignore_errors = True)
|
|
||||||
os.makedirs(chrootdir, exist_ok = True)
|
|
||||||
else:
|
|
||||||
os.makedirs(chrootdir, exist_ok = True)
|
|
||||||
# Open and extract the tarball
|
|
||||||
if not keep:
|
|
||||||
for a in build['arch']:
|
|
||||||
print("{0}: [PREP] Extracting tarball {1} ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball_path[a],
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball_path[a]))))
|
|
||||||
tar = tarfile.open(tarball_path[a], 'r:gz')
|
|
||||||
tar.extractall(path = chrootdir)
|
|
||||||
tar.close()
|
|
||||||
print("{0}: [PREP] Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
|
||||||
|
|
||||||
def buildChroot(conf, keep = False):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
user = conf['user']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
extradir = build['basedir'] + '/extra'
|
|
||||||
unpack_me = unpackTarball(downloadTarball(conf), build, keep)
|
|
||||||
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
|
|
||||||
prebuild_overlay = {}
|
|
||||||
prebuild_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
prebuild_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
prebuild_overlay[y] = []
|
|
||||||
prebuild_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/pre-build.d/'.format(extradir)):
|
|
||||||
prebuild_overlay['dirs'].append('{0}/'.format(path))
|
|
||||||
for file in files:
|
|
||||||
prebuild_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in prebuild_overlay.keys():
|
|
||||||
prebuild_overlay[x][:] = [re.sub('^{0}/pre-build.d/'.format(extradir), '', s) for s in prebuild_overlay[x]]
|
|
||||||
prebuild_overlay[x] = list(filter(None, prebuild_overlay[x]))
|
|
||||||
for y in prebuild_arch_overlay.keys():
|
|
||||||
prebuild_arch_overlay[y][x][:] = [i for i in prebuild_overlay[x] if i.startswith(y)]
|
|
||||||
prebuild_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in prebuild_arch_overlay[y][x]]
|
|
||||||
prebuild_arch_overlay[y][x] = list(filter(None, prebuild_arch_overlay[y][x]))
|
|
||||||
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
prebuild_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
for a in arch:
|
|
||||||
for dir in prebuild_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and copy over the files. again, chown to root.
|
|
||||||
for file in prebuild_overlay['files']:
|
|
||||||
shutil.copy2('{0}/pre-build.d/{1}'.format(extradir, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in prebuild_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in prebuild_arch_overlay[a]['files']:
|
|
||||||
shutil.copy2('{0}/pre-build.d/{1}/{2}'.format(extradir, a, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
|
|
||||||
def prepChroot(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
user = conf['user']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk_repo_dir = build['basedir']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
templates_dir = bdisk_repo_dir + '/extra/templates'
|
|
||||||
#build = {} # why was this here?
|
|
||||||
## let's prep some variables to write out the version info.txt
|
|
||||||
# and these should be passed in from the args, from the most part.
|
|
||||||
build['name'] = bdisk['name']
|
|
||||||
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
|
|
||||||
hostname = host.getHostname
|
|
||||||
build['user'] = os.environ['USER']
|
|
||||||
if 'SUDO_USER' in os.environ:
|
|
||||||
build['realuser'] = os.environ['SUDO_USER']
|
|
||||||
build['buildnum'] += 1
|
|
||||||
with open(dlpath + '/buildnum', 'w+') as f:
|
|
||||||
f.write(str(build['buildnum']) + "\n")
|
|
||||||
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
tpl = env.get_template('VERSION_INFO.txt.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, hostname = host.getHostname(), distro = host.getOS())
|
|
||||||
for a in arch:
|
|
||||||
# Copy the GPG pubkey
|
|
||||||
shutil.copy2('{0}/gpgkey.pub'.format(dlpath), '{0}/root.{1}/root/pubkey.gpg'.format(chrootdir, a))
|
|
||||||
# Write the VERSION_INFO.txt from template
|
|
||||||
with open('{0}/root.{1}/root/VERSION_INFO.txt'.format(chrootdir, a), 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
with open('{0}/VERSION_INFO.txt'.format(prepdir), 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And perform the templating overlays
|
|
||||||
templates_overlay = {}
|
|
||||||
templates_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
templates_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
templates_overlay[y] = []
|
|
||||||
templates_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/pre-build.d'.format(templates_dir)):
|
|
||||||
for dir in dirs:
|
|
||||||
templates_overlay['dirs'].append('{0}/'.format(dir))
|
|
||||||
for file in files:
|
|
||||||
templates_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in templates_overlay.keys():
|
|
||||||
templates_overlay[x][:] = [re.sub('^{0}/pre-build.d/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
|
|
||||||
templates_overlay[x] = list(filter(None, templates_overlay[x]))
|
|
||||||
for y in templates_arch_overlay.keys():
|
|
||||||
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
|
|
||||||
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
if '/' in templates_overlay['dirs']:
|
|
||||||
templates_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
if build['gpg']:
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']['mygpgkey']:
|
|
||||||
signkey = conf['gpg']['mygpgkey']
|
|
||||||
else:
|
|
||||||
signkey = str(gpg.signers[0].subkeys[0].fpr)
|
|
||||||
for a in arch:
|
|
||||||
for dir in templates_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and write the files. again, chown to root.
|
|
||||||
for file in templates_overlay['files']:
|
|
||||||
tplname = 'pre-build.d/{0}.j2'.format(file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey, user = user)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in templates_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in templates_arch_overlay[a]['files']:
|
|
||||||
tplname = 'pre-build.d/{0}/{1}.j2'.format(a, file)
|
|
||||||
tpl = env.get_template('{0}'.format(tplname))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
return(build)
|
|
||||||
|
|
||||||
def postChroot(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
overdir = build['basedir'] + '/overlay/'
|
|
||||||
templates_dir = '{0}/extra/templates'.format(build['basedir'])
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
postbuild_overlay = {}
|
|
||||||
postbuild_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
os.remove('{0}/root.{1}/README'.format(chrootdir, x))
|
|
||||||
postbuild_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
postbuild_overlay[y] = []
|
|
||||||
postbuild_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk(overdir):
|
|
||||||
postbuild_overlay['dirs'].append('{0}/'.format(path))
|
|
||||||
for file in files:
|
|
||||||
postbuild_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in postbuild_overlay.keys():
|
|
||||||
postbuild_overlay[x][:] = [re.sub('^' + overdir, '', s) for s in postbuild_overlay[x]]
|
|
||||||
postbuild_overlay[x] = list(filter(None, postbuild_overlay[x]))
|
|
||||||
for y in postbuild_arch_overlay.keys():
|
|
||||||
postbuild_arch_overlay[y][x][:] = [i for i in postbuild_overlay[x] if i.startswith(y)]
|
|
||||||
postbuild_arch_overlay[y][x][:] = [re.sub('^' + y + '/', '', s) for s in postbuild_arch_overlay[y][x]]
|
|
||||||
postbuild_arch_overlay[y][x] = list(filter(None, postbuild_arch_overlay[y][x]))
|
|
||||||
postbuild_overlay[x][:] = [y for y in postbuild_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
postbuild_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
for a in arch:
|
|
||||||
for dir in postbuild_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
|
|
||||||
# and copy over the files. again, chown to root.
|
|
||||||
for file in postbuild_overlay['files']:
|
|
||||||
shutil.copy2(overdir + file, '{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in postbuild_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
|
|
||||||
for file in postbuild_arch_overlay[a]['files']:
|
|
||||||
shutil.copy2('{0}{1}/{2}'.format(overdir, a, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file),
|
|
||||||
follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# And perform the templating overlays
|
|
||||||
templates_overlay = {}
|
|
||||||
templates_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
templates_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
templates_overlay[y] = []
|
|
||||||
templates_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/overlay'.format(templates_dir)):
|
|
||||||
for dir in dirs:
|
|
||||||
templates_overlay['dirs'].append('{0}/'.format(dir))
|
|
||||||
for file in files:
|
|
||||||
templates_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in templates_overlay.keys():
|
|
||||||
templates_overlay[x][:] = [re.sub('^{0}/overlay/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
|
|
||||||
templates_overlay[x] = list(filter(None, templates_overlay[x]))
|
|
||||||
for y in templates_arch_overlay.keys():
|
|
||||||
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
|
|
||||||
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
if '/' in templates_overlay['dirs']:
|
|
||||||
templates_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
if build['gpg']:
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']['mygpgkey']:
|
|
||||||
signkey = conf['gpg']['mygpgkey']
|
|
||||||
else:
|
|
||||||
signkey = str(gpg.signers[0].subkeys[0].fpr)
|
|
||||||
for a in arch:
|
|
||||||
for dir in templates_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and write the files. again, chown to root.
|
|
||||||
for file in templates_overlay['files']:
|
|
||||||
tplname = 'overlay/{0}.j2'.format(file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in templates_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in templates_arch_overlay[a]['files']:
|
|
||||||
tplname = 'overlay/{0}/{1}.j2'.format(a, file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
129
bdisk/prompt_strings.py
Normal file
129
bdisk/prompt_strings.py
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
# These are *key* ciphers, for encrypting exported keys.
|
||||||
|
openssl_ciphers = ['aes128', 'aes192', 'aes256', 'bf', 'blowfish',
|
||||||
|
'camellia128', 'camellia192', 'camellia256', 'cast', 'des',
|
||||||
|
'des3', 'idea', 'rc2', 'seed']
|
||||||
|
# These are *hash algorithms* for cert digests.
|
||||||
|
openssl_digests = ['blake2b512', 'blake2s256', 'gost', 'md4', 'md5', 'mdc2',
|
||||||
|
'rmd160', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
|
||||||
|
|
||||||
|
class PromptStrings(object):
|
||||||
|
gpg = {
|
||||||
|
'attribs': {
|
||||||
|
'algo': {
|
||||||
|
'text': 'the subkey\'s encryption type/algorithm',
|
||||||
|
# The following can ONLY be used for encryption, not signing: elg, cv
|
||||||
|
#'choices': ['rsa', 'dsa', 'elg', 'ed', 'cv', 'nistp', 'brainpool.1', 'secp.k1'],
|
||||||
|
'choices': ['rsa', 'dsa', 'ed', 'nist', 'brainpool.1', 'sec.k1'],
|
||||||
|
#'default': 'rsa'
|
||||||
|
'default': 'ed'
|
||||||
|
},
|
||||||
|
'keysize': {
|
||||||
|
'text': 'the subkey\'s key size (in bits)',
|
||||||
|
'choices': {
|
||||||
|
'rsa': ['1024', '2048', '4096'],
|
||||||
|
'dsa': ['768', '2048', '3072'],
|
||||||
|
#'elg': ['1024', '2048', '4096'], # Invalid for signing, etc.
|
||||||
|
'ed': ['25519'],
|
||||||
|
#'cv': ['25519'],
|
||||||
|
'nistp': ['256', '384', '521'],
|
||||||
|
'brainpool.1': ['256', '384', '512'],
|
||||||
|
'sec.k1': ['256']
|
||||||
|
},
|
||||||
|
'default': {
|
||||||
|
'rsa': '4096',
|
||||||
|
'dsa': '3072',
|
||||||
|
'ed': '25519',
|
||||||
|
'nistp': '521',
|
||||||
|
'brainpool.1': '512',
|
||||||
|
'sec.k1': '256'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'params': ['name', 'email', 'comment']
|
||||||
|
}
|
||||||
|
ssl = {
|
||||||
|
'attribs': {
|
||||||
|
'cert': {
|
||||||
|
'hash_algo': {
|
||||||
|
'text': ('What hashing algorithm do you want to use? '
|
||||||
|
'(Default is sha512.)'),
|
||||||
|
'prompt': 'Hashing algorithm: ',
|
||||||
|
'options': openssl_digests,
|
||||||
|
'default': 'aes256'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'key': {
|
||||||
|
'cipher': {
|
||||||
|
'text': ('What encryption algorithm/cipher do you want to '
|
||||||
|
'use? (Default is aes256.) Use "none" to specify '
|
||||||
|
'a key without a passphrase.'),
|
||||||
|
'prompt': 'Cipher: ',
|
||||||
|
'options': openssl_ciphers + ['none'],
|
||||||
|
'default': 'aes256'
|
||||||
|
},
|
||||||
|
'keysize': {
|
||||||
|
'text': ('What keysize/length (in bits) do you want the '
|
||||||
|
'key to be? (Default is 4096; much higher values '
|
||||||
|
'are possible but are untested and thus not '
|
||||||
|
'supported by this tool; feel free to edit the '
|
||||||
|
'generated configuration by hand.) (If the key '
|
||||||
|
'cipher is "none", this is ignored.)'),
|
||||||
|
'prompt': 'Keysize: ',
|
||||||
|
# TODO: do all openssl_ciphers support these sizes?
|
||||||
|
'options': ['1024', '2048', '4096'],
|
||||||
|
'default': 'aes256'
|
||||||
|
},
|
||||||
|
'passphrase': {
|
||||||
|
'text': ('What passphrase do you want to use for the key? '
|
||||||
|
'If you specified the cipher as "none", this is '
|
||||||
|
'ignored (you can just hit enter).'),
|
||||||
|
'prompt': 'Passphrase (will not echo back): ',
|
||||||
|
'options': None,
|
||||||
|
'default': ''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'paths': {
|
||||||
|
'cert': '(or read from) the certificate',
|
||||||
|
'key': '(or read from) the key',
|
||||||
|
'csr': ('(or read from) the certificate signing request (if '
|
||||||
|
'blank, we won\'t write to disk - the operation will '
|
||||||
|
'occur entirely in memory assuming we need to generate/'
|
||||||
|
'sign)')
|
||||||
|
},
|
||||||
|
'paths_ca': {
|
||||||
|
'index': ('(or read from) the CA (Certificate Authority) Database '
|
||||||
|
'index file (if left blank, one will not be used)'),
|
||||||
|
'serial': ('(or read from) the CA (Certificate Authority) '
|
||||||
|
'Database serial file (if left blank, one will not be '
|
||||||
|
'used)'),
|
||||||
|
},
|
||||||
|
'subject': {
|
||||||
|
'countryName': {
|
||||||
|
'text': ('the 2-letter country abbreviation (must conform to '
|
||||||
|
'ISO3166 ALPHA-2)?\n'
|
||||||
|
'Country code: ')
|
||||||
|
},
|
||||||
|
'localityName': {
|
||||||
|
'text': ('the city/town/borough/locality name?\n'
|
||||||
|
'Locality: ')
|
||||||
|
},
|
||||||
|
'stateOrProvinceName': {
|
||||||
|
'text': ('the state/region name (full string)?\n'
|
||||||
|
'Region: ')
|
||||||
|
},
|
||||||
|
'organization': {
|
||||||
|
'text': ('your organization\'s name?\n'
|
||||||
|
'Organization: ')
|
||||||
|
},
|
||||||
|
'organizationalUnitName': {
|
||||||
|
'text': ('your department/role/team/department name?\n'
|
||||||
|
'Organizational Unit: ')
|
||||||
|
},
|
||||||
|
'emailAddress': {
|
||||||
|
'text': ('the email address to be associated with this '
|
||||||
|
'certificate/PKI object?\n'
|
||||||
|
'Email: ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
2
bdisk/sync.py
Normal file
2
bdisk/sync.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
import shutil
|
||||||
|
import subprocess
|
1156
bdisk/utils.py
Normal file
1156
bdisk/utils.py
Normal file
File diff suppressed because it is too large
Load Diff
1
bdisk/version.py
Normal file
1
bdisk/version.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
BDISK_VERSION = '4.0.0a1'
|
5
bin/bdisk.py
Normal file
5
bin/bdisk.py
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#!/usr/bin/env python3.6
|
||||||
|
|
||||||
|
# PLACEHOLDER - this will be a thin wrapper installed to /usr/bin/bdisk.
|
||||||
|
import argparse
|
||||||
|
import bdisk
|
4
bin/bdiskcfg.py
Normal file
4
bin/bdiskcfg.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3.6
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import bdisk.confgen as confgen
|
3
bin/xmllint.sh
Executable file
3
bin/xmllint.sh
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
xmllint -schema /opt/dev/bdisk/bdisk/bdisk.xsd /opt/dev/bdisk/docs/examples/multi_profile.xml --noout
|
12
docs/CREDITS
12
docs/CREDITS
@ -1,12 +0,0 @@
|
|||||||
iPXE:
|
|
||||||
Thanks to "eworm" for his work on the AUR iPXE-git package:
|
|
||||||
https://aur.archlinux.org/packages/ipxe-git/
|
|
||||||
|
|
||||||
and specifically the following patches:
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0001-git-version.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0002-banner.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0003-iso-efi.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0004-fix-no-pie-workaround.patch
|
|
||||||
|
|
||||||
|
|
||||||
thanks to jthan, even though he drives me batty sometimes.
|
|
33
docs/FAQ
33
docs/FAQ
@ -1,33 +0,0 @@
|
|||||||
BDisk Frequently Asked(/Unasked) Questions
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
0.) Why does it take so long to build?
|
|
||||||
1.) Why is the generated ISO file so big?
|
|
||||||
2.) How do I find the version/release/etc. number of an ISO?
|
|
||||||
|
|
||||||
|
|
||||||
=========================================================
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
0.) WHY DOES IT TAKE SO LONG TO BUILD?
|
|
||||||
A: This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server.
|
|
||||||
If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots.
|
|
||||||
Thankfully, there is an easy fix. You can install the "haveged"(http://www.issihosts.com/haveged/) software and run it. This will
|
|
||||||
show an immediate and non-negligible improvement for the above contexts. If you have extra power to throw at it (or are using a dedicated build box)
|
|
||||||
as well, I recommend enabling I_AM_A_RACECAR in your build.conf. BDisk will then be more aggressive with its resource consumption.
|
|
||||||
|
|
||||||
|
|
||||||
1.) WHY IS THE GENERATED ISO FILE SO BIG?
|
|
||||||
A: You may have enabled a LOT of packages in extra/packages.(32|64|both). Or you're using the default set of packages, which tries to include a LOT
|
|
||||||
of different (and in some cases, redundant) packages for widespread utilization and usage. In addition, keep in mind that BDisk builds a single ISO
|
|
||||||
that can be used on both i686 architectures AND full x86_64 architectures ("AMD64" as you may sometimes see it referenced). Because it doesn't cheat
|
|
||||||
and just use a 64-bit kernel with a 32-bit userland, it needs two different squash images on each ISO- one for 32-bit userland and one for 64-bit
|
|
||||||
userland.
|
|
||||||
|
|
||||||
2.) HOW DO I FIND THE VERSION/RELEASE/ETC. NUMBER OF AN ISO?
|
|
||||||
A: This can be found in a multitude of places. The full-size ISO file (iso/<distname>-<git tag>-<git rev number>-(32|64|any).iso) should have the
|
|
||||||
version right in the file name. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux,
|
|
||||||
*BSD, or Mac OS X and check /path/to/mounted/iso/VERSION_INTO.txt. Lastly, within the runtime itself (especially handy if booting via iPXE), you can
|
|
||||||
check /root/VERSION_INFO.txt within the running live environment.
|
|
@ -1,8 +0,0 @@
|
|||||||
Please see the full documentation at https://bdisk.square-r00t.net
|
|
||||||
|
|
||||||
Alternatively, you can compile the manual yourself (requires asciidoc and asciidoctor):
|
|
||||||
|
|
||||||
cd docs/manual
|
|
||||||
asciidoctor -o /tmp/README.html HEAD.adoc
|
|
||||||
|
|
||||||
Then point your browser to /tmp/README.html
|
|
@ -1 +0,0 @@
|
|||||||
CREDITS
|
|
60
docs/TODO
60
docs/TODO
@ -1,60 +0,0 @@
|
|||||||
-investigate weird signing issue- if you specify a key to sign with, it appears that the squashed images (at least in the http dir) doesn't have a sig/asc. do they need to copy it over? or is it not even signing them?
|
|
||||||
-switch from python-pygpgme to python-gpgme for better performance. also clean up bGPG in general; reference KANT.
|
|
||||||
-more pythonic! classes (because inits help), use list or tuple constant for checksums, try vars-ing the configparser stuff (and move defaults to in-code?),
|
|
||||||
change path combinations to use os.path.join etc.
|
|
||||||
-modularity: https://stackoverflow.com/a/8719100
|
|
||||||
|
|
||||||
-mtree-like functionality; if mtree spec is found, apply that to files in overlay (or chroot even); otherwise copy from overlay and don't touch chroot
|
|
||||||
|
|
||||||
-i_am_a_racecar optimizations
|
|
||||||
- different distro guests (debian, etc.)- https://stackoverflow.com/questions/2349991/python-how-to-import-other-python-files/20749411#20749411
|
|
||||||
-incorporate this into the manual?
|
|
||||||
-TFTP configs generated for pxelinux
|
|
||||||
-fix the branding, etc. on ipxe. :(
|
|
||||||
-add ipxe to full iso maybe?
|
|
||||||
-include benchmarking
|
|
||||||
-- http://sourceforge.net/projects/unixbench/
|
|
||||||
-- https://code.google.com/p/byte-unixbench/
|
|
||||||
-- https://github.com/akopytov/sysbench
|
|
||||||
-- (http://blog.due.io/2014/linode-digitalocean-and-vultr-comparison/ etc.)
|
|
||||||
-There *has* to be a better way of handling package installation in the chroots.
|
|
||||||
--implement pyalpm to decreate dependency on chroot pacman-ing?
|
|
||||||
--or even maybe https://wiki.archlinux.org/index.php/offline_installation_of_packages in pure python!
|
|
||||||
-set up automatic exporting to PDF of the user manual server-side. https://pypi.python.org/pypi/unoconv/0.6
|
|
||||||
-maybe remove lxde, firefox, chrome and replace with enlightenment/midori?
|
|
||||||
-custom repo? https://brainwreckedtech.wordpress.com/2013/01/27/making-your-own-arch-linux-repository/
|
|
||||||
--https://wiki.archlinux.org/index.php/Building_32-bit_packages_on_a_64-bit_system # NOTE: arch has dropped i686, now continued as archlinux32
|
|
||||||
-implement better "additional" packages list. specify for path in build.ini- these should be more easily changed by end users. DON'T TOUCH iso.pkgs.lst since those are necessary for booting.
|
|
||||||
-automatic shorewall/some other firewall?
|
|
||||||
-autodetection/configuration of network. DHCP is currently running by default, but does it need to support IPv6? if so, how would the user configure their network?
|
|
||||||
-DISABLE NETWORKMANAGER AND "fi.w1.wpa_supplicant1"??? keeps spawning wpa_supplicant (and thusly killing networking proper)
|
|
||||||
-for netboot, custom user agent (should be defined by build.ini)
|
|
||||||
--iPXE's curl
|
|
||||||
--initrd's curl
|
|
||||||
-WISH: Better logging/debugging
|
|
||||||
https://web.archive.org/web/20170726052946/http://www.lexev.org/en/2013/python-logging-every-day/
|
|
||||||
|
|
||||||
-WISH: signing for secureboot releases (PreLoader and loader.efi handle this okay, but require manual intervention)
|
|
||||||
-does loader.efi support splash backgrounds? can i implement that differently somehow?
|
|
||||||
--yes, see e.g. https://www.reddit.com/r/archlinux/comments/3bwgf0/where_put_the_splasharchbmp_to_splash_screen_boot/
|
|
||||||
-strip out/remove unnecessary and orphan packages (e.g. gcc, make, automake, etc.) before building ISO
|
|
||||||
-incorporate iPXE tweaks:
|
|
||||||
--http://ipxe.org/crypto
|
|
||||||
--http://ipxe.org/cmd/imgtrust
|
|
||||||
--http://ipxe.org/cmd/imgverify
|
|
||||||
--enable use of custom CA/self-signed certs for HTTPS etc.
|
|
||||||
--signed kernel and initrd for ipxe:
|
|
||||||
---#imgtrust --permanent
|
|
||||||
---#imgverify vmlinuz path/to/vmlinuz.sig
|
|
||||||
---#imgverify initrd path/to/initrd.sig
|
|
||||||
---DONE, partially. need to incorporate codesign certs/keys. routines, conf variables
|
|
||||||
-enable mirror= kernel commandline.
|
|
||||||
-NOTE: Following should be implemented via AIF-NG (https://git.square-r00t.net/AIF-NG, work pending for fix to BDisk for i686/x86_64 split)
|
|
||||||
--if mirror_(NAME) is present, use that as repo name.
|
|
||||||
--if it starts with /, treat as mirrorlist (Include); otherwise use Server =
|
|
||||||
--if it has mirror_SIG-X, set signature options e.g. _SIG-N would be "SigLevel = Never"
|
|
||||||
-iPXE background support. sed -rf "${BASEDIR}/src/ipxe_local/script.sed" ${SRCDIR}/ipxe/src/config/general.h ; sed -rf "${BASEDIR}/src/ipxe_local/script2.sed" ${SRCDIR}/ipxe/src/config/console.h
|
|
||||||
--note that iPXE VESAFB console is not (yet) supported in EFI, so this is on hold. check into this to see if it has changed.
|
|
||||||
-include WinMTR, build Mac OS X MTR for dist/tools on CD
|
|
||||||
-include pre-compiled LibreCrypt for opening LUKS parts on Windows (https://github.com/t-d-k/LibreCrypt)
|
|
||||||
--curl -s https://raw.githubusercontent.com/t-d-k/LibreCrypt/master/README.md | egrep 'InstallLibreCrypt_v[A-Za-z0-9\.]*.exe' | cut -f2 -d'"'
|
|
288
docs/examples/multi_profile.xml
Normal file
288
docs/examples/multi_profile.xml
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
<?xml version='1.0' encoding='UTF-8'?>
|
||||||
|
<bdisk xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://bdisk.square-r00t.net/" xsi:schemaLocation="http://bdisk.square-r00t.net bdisk.xsd">
|
||||||
|
<profile name="default" id="1" uuid="8cdd6bcb-c147-4a63-9779-b5433c510dbc">
|
||||||
|
<meta>
|
||||||
|
<names>
|
||||||
|
<name>BDISK</name>
|
||||||
|
<!--<name>{xpath%../uxname/text()}</name>-->
|
||||||
|
<uxname>bdisk</uxname>
|
||||||
|
<!-- Just like with previous versions of BDisk, you can reference other values...
|
||||||
|
but now with the neat benefits of XPath! Everything you could do in build.ini's and more.
|
||||||
|
See https://www.w3schools.com/xml/xpath_syntax.asp
|
||||||
|
If you need a literal curly brace, double them (e.g. for "{foo}", use "{{foo}}"),
|
||||||
|
UNLESS it's in a <regexes><pattern> as part of the expression. Those are taken as literal strings. -->
|
||||||
|
<pname>{xpath%../name/text()}</pname>
|
||||||
|
</names>
|
||||||
|
<desc>A rescue/restore live environment.</desc>
|
||||||
|
<dev>
|
||||||
|
<author>A. Dev Eloper</author>
|
||||||
|
<email>dev@domain.tld</email>
|
||||||
|
<website>https://domain.tld/~dev</website>
|
||||||
|
</dev>
|
||||||
|
<uri>https://domain.tld/projname</uri>
|
||||||
|
<ver>1.0.0</ver>
|
||||||
|
<!-- This is the VERY FIRST value parsed, and is required. It controls how many levels of {xpath%...} to recurse. -->
|
||||||
|
<!-- If the maximum level is reached, the substitution will evaluate as blank. -->
|
||||||
|
<max_recurse>5</max_recurse>
|
||||||
|
<!-- You need to store regex patterns here and reference them in a special way later, and it's only valid for certain
|
||||||
|
items. See the manual for more information. NO btags within the patterns is allowed. -->
|
||||||
|
<regexes>
|
||||||
|
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$</pattern>
|
||||||
|
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||||
|
</regexes>
|
||||||
|
<!-- You can also define variables. NO xpath or regex btags, and they can't be used within other btags! -->
|
||||||
|
<variables>
|
||||||
|
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||||
|
</variables>
|
||||||
|
</meta>
|
||||||
|
<accounts>
|
||||||
|
<!-- Salted/hashed password is "test" -->
|
||||||
|
<rootpass hashed="true">$6$7KfIdtHTcXwVrZAC$LZGNeMNz7v5o/cYuA48FAxtZynpIwO5B1CPGXnOW5kCTVpXVt4SypRqfM.AoKkFt/O7MZZ8ySXJmxpELKmdlF1</rootpass>
|
||||||
|
<user sudo="true">
|
||||||
|
<username>{xpath%//meta/names/uxname/text()}</username>
|
||||||
|
<!-- You can also use substitution from different profiles in this same configuration: -->
|
||||||
|
<!-- <username>{xpath%//profile[@name='another_profile']/meta/names/uxname"}</username> -->
|
||||||
|
<comment>{xpath%//meta/dev/author/text()}</comment>
|
||||||
|
<password hashed="false" hash_algo="sha512" salt="auto">testpassword</password>
|
||||||
|
</user>
|
||||||
|
<user sudo="false">
|
||||||
|
<username>testuser</username>
|
||||||
|
<comment>Test User</comment>
|
||||||
|
<password hashed="false" hash_algo="sha512" salt="auto">anothertestpassword</password>
|
||||||
|
</user>
|
||||||
|
</accounts>
|
||||||
|
<sources>
|
||||||
|
<source arch="x86_64">
|
||||||
|
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||||
|
<checksum hash_algo="sha1" explicit="false" flags="latest">sha1sums.txt</checksum>
|
||||||
|
<sig keys="7F2D434B9741E8AC" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_x86_64}</sig>
|
||||||
|
</source>
|
||||||
|
<source arch="i686">
|
||||||
|
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||||
|
<checksum hash_algo="sha512" explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e</checksum>
|
||||||
|
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_i686}</sig>
|
||||||
|
</source>
|
||||||
|
</sources>
|
||||||
|
<build its_full_of_stars="true">
|
||||||
|
<paths>
|
||||||
|
<base>{variable%bdisk_root}/base</base>
|
||||||
|
<cache>{variable%bdisk_root}/cache</cache>
|
||||||
|
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||||
|
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||||
|
<templates>{variable%bdisk_root}/templates</templates>
|
||||||
|
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||||
|
<distros>{variable%bdisk_root}/distros</distros>
|
||||||
|
<dest>{variable%bdisk_root}/results</dest>
|
||||||
|
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||||
|
<http>{variable%bdisk_root}/http</http>
|
||||||
|
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||||
|
<pki>{variable%bdisk_root}/pki</pki>
|
||||||
|
</paths>
|
||||||
|
<basedistro>archlinux</basedistro>
|
||||||
|
</build>
|
||||||
|
<iso sign="true" multi_arch="true"/>
|
||||||
|
<ipxe sign="true" iso="true">
|
||||||
|
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||||
|
</ipxe>
|
||||||
|
<pki overwrite="false">
|
||||||
|
<!-- http://ipxe.org/crypto -->
|
||||||
|
<ca>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||||
|
<!-- If csr is self-enclosed (<csr />), we'll just generate and use a CSR in-memory.
|
||||||
|
Assuming we need to generate a certificate, anyways.
|
||||||
|
If you want to write it out to disk (for debugging, etc.) OR use one already generated,
|
||||||
|
then provide a path.
|
||||||
|
e.g.:
|
||||||
|
<csr>{xpath%build/paths/ssl/text()}/ca.csr</csr> -->
|
||||||
|
<csr/>
|
||||||
|
<!-- If you use an index file (or want to) to serialize client certificates, specify it here. -->
|
||||||
|
<!-- It must conform to CADB spec (https://pki-tutorial.readthedocs.io/en/latest/cadb.html). -->
|
||||||
|
<!-- You should probably also specify a serial file if so. -->
|
||||||
|
<!-- Both of these are entirely optional if you aren't using an existing PKI. -->
|
||||||
|
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||||
|
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||||
|
<!-- If you specify a cipher, the key will be encrypted to the passphrase provided by the passphrase attribute.
|
||||||
|
If the key is encrypted (either a pre-existing or a created one) but passphrase is not provided, you will
|
||||||
|
be (securely) prompted for the passphrase to unlock it/add a passphrase to it. -->
|
||||||
|
<key cipher="none" passphrase="none" keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>domain.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</ca>
|
||||||
|
<client>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||||
|
<csr/>
|
||||||
|
<key cipher="none" passphrase="none" keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>website.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</client>
|
||||||
|
</pki>
|
||||||
|
<!-- If prompt_passphrase is false and passphrase attribute is not given for a gpg element, we will try to use a
|
||||||
|
blank passphrase for all operations. -->
|
||||||
|
<gpg keyid="none" gnupghome="none" publish="false" prompt_passphrase="false">
|
||||||
|
<!-- The below is only used if we are generating a key (i.e. keyid="none"). -->
|
||||||
|
<key algo="rsa" keysize="4096" expire="0">
|
||||||
|
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||||
|
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||||
|
<!-- If present, the subkey element will create a secondary key used *only* for signing. This is good security practice. Obviously, this is only used if we are creating a new (master) key. -->
|
||||||
|
<subkey algo="ed" keysize="25519" expire="0"/>
|
||||||
|
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||||
|
</key>
|
||||||
|
</gpg>
|
||||||
|
<sync>
|
||||||
|
<!-- ipxe includes the http directory. or should, anyways. -->
|
||||||
|
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||||
|
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||||
|
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||||
|
<gpg enabled="true" format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||||
|
<rsync enabled="true">
|
||||||
|
<user>root</user>
|
||||||
|
<host>mirror.domain.tld</host>
|
||||||
|
<port>22</port>
|
||||||
|
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||||
|
</rsync>
|
||||||
|
</sync>
|
||||||
|
</profile>
|
||||||
|
<profile name="alternate" id="2" uuid="2ed07c19-2071-4d66-8569-da40475ba716">
|
||||||
|
<meta>
|
||||||
|
<names>
|
||||||
|
<name>ALTCD</name>
|
||||||
|
<uxname>bdisk_alt</uxname>
|
||||||
|
<pname>{xpath%../name/text()}</pname>
|
||||||
|
</names>
|
||||||
|
<desc>Another rescue/restore live environment.</desc>
|
||||||
|
<dev>
|
||||||
|
<author>Another Dev Eloper</author><!-- You can reference other profiles within the same configuration. -->
|
||||||
|
<email>{xpath%//profile[@name="default"]/meta/dev/email/text()}</email>
|
||||||
|
<website>{xpath%//profile[@name="default"]/meta/dev/website/text()}</website>
|
||||||
|
</dev>
|
||||||
|
<uri>https://domain.tld/projname</uri>
|
||||||
|
<ver>0.0.1</ver>
|
||||||
|
<max_recurse>5</max_recurse>
|
||||||
|
<regexes>
|
||||||
|
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$</pattern>
|
||||||
|
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||||
|
</regexes>
|
||||||
|
<variables>
|
||||||
|
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||||
|
</variables>
|
||||||
|
</meta>
|
||||||
|
<accounts>
|
||||||
|
<rootpass hashed="false">atotallyinsecurepassword</rootpass>
|
||||||
|
<user sudo="false">
|
||||||
|
<username>testuser</username>
|
||||||
|
<comment>Test User</comment>
|
||||||
|
<password hashed="false" hash_algo="sha512" salt="auto">atestpassword</password>
|
||||||
|
</user>
|
||||||
|
</accounts>
|
||||||
|
<sources>
|
||||||
|
<source arch="x86_64">
|
||||||
|
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||||
|
<checksum hash_algo="sha1" explicit="false" flags="latest">sha1sums.txt</checksum>
|
||||||
|
<sig keys="7F2D434B9741E8AC" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_x86_64}</sig>
|
||||||
|
</source>
|
||||||
|
<source arch="i686">
|
||||||
|
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||||
|
<checksum hash_algo="sha512" explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e</checksum>
|
||||||
|
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506" keyserver="hkp://pool.sks-keyservers.net" flags="regex latest">{regex%sig_i686}</sig>
|
||||||
|
</source>
|
||||||
|
</sources>
|
||||||
|
<build its_full_of_stars="true">
|
||||||
|
<paths>
|
||||||
|
<base>{variable%bdisk_root}/base</base>
|
||||||
|
<cache>{variable%bdisk_root}/cache</cache>
|
||||||
|
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||||
|
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||||
|
<templates>{variable%bdisk_root}/templates</templates>
|
||||||
|
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||||
|
<distros>{variable%bdisk_root}/distros</distros>
|
||||||
|
<dest>{variable%bdisk_root}/results</dest>
|
||||||
|
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||||
|
<http>{variable%bdisk_root}/http</http>
|
||||||
|
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||||
|
<pki>{variable%bdisk_root}/pki</pki>
|
||||||
|
</paths>
|
||||||
|
<basedistro>archlinux</basedistro>
|
||||||
|
</build>
|
||||||
|
<iso sign="true" multi_arch="true"/>
|
||||||
|
<ipxe sign="true" iso="true">
|
||||||
|
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||||
|
</ipxe>
|
||||||
|
<pki overwrite="false">
|
||||||
|
<ca>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||||
|
<csr/>
|
||||||
|
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||||
|
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||||
|
<key cipher="none" passphrase="none" keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>domain.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</ca>
|
||||||
|
<client>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||||
|
<csr/>
|
||||||
|
<key cipher="none" passphrase="none" keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>website.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</client>
|
||||||
|
</pki>
|
||||||
|
<gpg keyid="none" gnupghome="none" publish="false" prompt_passphrase="false">
|
||||||
|
<key algo="rsa" keysize="4096" expire="0">
|
||||||
|
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||||
|
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||||
|
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||||
|
</key>
|
||||||
|
</gpg>
|
||||||
|
<sync>
|
||||||
|
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||||
|
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||||
|
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||||
|
<gpg enabled="true" format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||||
|
<rsync enabled="true">
|
||||||
|
<user>root</user>
|
||||||
|
<host>mirror.domain.tld</host>
|
||||||
|
<port>22</port>
|
||||||
|
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||||
|
</rsync>
|
||||||
|
</sync>
|
||||||
|
</profile>
|
||||||
|
</bdisk>
|
84
docs/examples/regen_multi.py
Executable file
84
docs/examples/regen_multi.py
Executable file
@ -0,0 +1,84 @@
|
|||||||
|
#!/usr/bin/env python3.6
|
||||||
|
|
||||||
|
import copy
|
||||||
|
from lxml import etree, objectify
|
||||||
|
|
||||||
|
#parser = etree.XMLParser(remove_blank_text = True)
|
||||||
|
parser = etree.XMLParser(remove_blank_text = False)
|
||||||
|
|
||||||
|
# We need to append to a new root because you can't edit nsmap, and you can't
|
||||||
|
# xpath on an element with a naked namespace (e.g. 'xlmns="..."').
|
||||||
|
ns = {None: 'http://bdisk.square-r00t.net/',
|
||||||
|
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
|
||||||
|
xsi = {'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation':
|
||||||
|
'http://bdisk.square-r00t.net bdisk.xsd'}
|
||||||
|
new_cfg = etree.Element('bdisk', nsmap = ns, attrib = xsi)
|
||||||
|
new_cfg.text = '\n '
|
||||||
|
|
||||||
|
with open('single_profile.xml', 'rb') as f:
|
||||||
|
xml = etree.fromstring(f.read(), parser)
|
||||||
|
|
||||||
|
|
||||||
|
roottree = xml.getroottree()
|
||||||
|
for elem in roottree.getiterator():
|
||||||
|
if not hasattr(elem.tag, 'find'):
|
||||||
|
continue
|
||||||
|
i = elem.tag.find('}')
|
||||||
|
if i >= 0:
|
||||||
|
elem.tag = elem.tag[i + 1:]
|
||||||
|
objectify.deannotate(roottree, cleanup_namespaces = True)
|
||||||
|
|
||||||
|
|
||||||
|
single_profile = xml.xpath('/bdisk/profile[1]')[0]
|
||||||
|
alt_profile = copy.deepcopy(single_profile)
|
||||||
|
for c in alt_profile.xpath('//comment()'):
|
||||||
|
p = c.getparent()
|
||||||
|
p.remove(c)
|
||||||
|
|
||||||
|
# Change the profile identifiers
|
||||||
|
alt_profile.attrib['name'] = 'alternate'
|
||||||
|
alt_profile.attrib['id'] = '2'
|
||||||
|
alt_profile.attrib['uuid'] = '2ed07c19-2071-4d66-8569-da40475ba716'
|
||||||
|
|
||||||
|
meta_tags = {'name': 'ALTCD',
|
||||||
|
'uxname': 'bdisk_alt',
|
||||||
|
'pname': '{xpath%../name/text()}',
|
||||||
|
'desc': 'Another rescue/restore live environment.',
|
||||||
|
'author': 'Another Dev Eloper',
|
||||||
|
'email': '{xpath%//profile[@name="default"]/meta/dev/email/text()}',
|
||||||
|
'website': '{xpath%//profile[@name="default"]/meta/dev/website/text()}',
|
||||||
|
'ver': '0.0.1'}
|
||||||
|
# Change the names
|
||||||
|
meta = alt_profile.xpath('/profile/meta')[0]
|
||||||
|
for e in meta.iter():
|
||||||
|
if e.tag in meta_tags:
|
||||||
|
e.text = meta_tags[e.tag]
|
||||||
|
|
||||||
|
accounts_tags = {'rootpass': 'atotallyinsecurepassword',
|
||||||
|
'username': 'testuser',
|
||||||
|
'comment': 'Test User',
|
||||||
|
'password': 'atestpassword'}
|
||||||
|
accounts = alt_profile.xpath('/profile/accounts')[0]
|
||||||
|
for e in accounts.iter():
|
||||||
|
if e.tag in accounts_tags:
|
||||||
|
e.text = accounts_tags[e.tag]
|
||||||
|
if e.tag == 'rootpass':
|
||||||
|
e.attrib['hashed'] = 'false'
|
||||||
|
elif e.tag == 'user':
|
||||||
|
e.attrib['sudo'] = 'false'
|
||||||
|
# Delete the second user
|
||||||
|
accounts.remove(accounts[2])
|
||||||
|
author = alt_profile.xpath('/profile/meta/dev/author')[0]
|
||||||
|
author.addnext(etree.Comment(
|
||||||
|
' You can reference other profiles within the same configuration. '))
|
||||||
|
#xml.append(alt_profile)
|
||||||
|
|
||||||
|
for child in xml.xpath('/bdisk/profile'):
|
||||||
|
new_cfg.append(copy.deepcopy(child))
|
||||||
|
new_cfg.append(alt_profile)
|
||||||
|
|
||||||
|
with open('multi_profile.xml', 'wb') as f:
|
||||||
|
f.write(etree.tostring(new_cfg,
|
||||||
|
pretty_print = True,
|
||||||
|
encoding = 'UTF-8',
|
||||||
|
xml_declaration = True))
|
55
docs/examples/simple_profile.xml
Normal file
55
docs/examples/simple_profile.xml
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<bdisk>
|
||||||
|
<profile name="simple" id="1" uuid="7b9128d2-0ba5-4302-9b3c-9951401853e5">
|
||||||
|
<meta>
|
||||||
|
<names>
|
||||||
|
<name>BDISK</name>
|
||||||
|
<uxname>BDisk</uxname>
|
||||||
|
<pname>BDisk</pname>
|
||||||
|
</names>
|
||||||
|
<desc>A rescue/restore live environment.</desc>
|
||||||
|
<dev>
|
||||||
|
<author>A. Dev Eloper</author>
|
||||||
|
<email>dev@domain.tld</email>
|
||||||
|
<website>https://domain.tld/~dev</website>
|
||||||
|
</dev>
|
||||||
|
<uri>https://domain.tld/projname</uri>
|
||||||
|
<ver>1.0.0</ver>
|
||||||
|
<max_recurse>5</max_recurse>
|
||||||
|
<regexes/>
|
||||||
|
<variables/>
|
||||||
|
</meta>
|
||||||
|
<accounts>
|
||||||
|
<rootpass hashed="no" hash_algo="sha512" salt="auto">testpassword</rootpass>
|
||||||
|
</accounts>
|
||||||
|
<sources>
|
||||||
|
<source arch="x86_64">
|
||||||
|
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball>archlinux-bootstrap-2018.05.01-x86_64.tar.gz</tarball>
|
||||||
|
<checksum/>
|
||||||
|
<sig/>
|
||||||
|
</source>
|
||||||
|
</sources>
|
||||||
|
<build its_full_of_stars="no">
|
||||||
|
<paths>
|
||||||
|
<base>/tmp/bdisk/base</base>
|
||||||
|
<cache>/tmp/bdisk/cache</cache>
|
||||||
|
<chroot>/tmp/bdisk/chroots</chroot>
|
||||||
|
<overlay>/tmp/bdisk/overlay</overlay>
|
||||||
|
<templates>/tmp/bdisk/templates</templates>
|
||||||
|
<mount>/mnt/bdisk</mount>
|
||||||
|
<distros>/tmp/bdisk/distros</distros>
|
||||||
|
<dest>/tmp/bdisk/results</dest>
|
||||||
|
<iso>/tmp/bdisk/iso_overlay</iso>
|
||||||
|
<http>/tmp/bdisk/http</http>
|
||||||
|
<tftp>/tmp/bdisk/tftp</tftp>
|
||||||
|
<pki>/tmp/bdisk/pki</pki>
|
||||||
|
</paths>
|
||||||
|
<basedistro>archlinux</basedistro>
|
||||||
|
</build>
|
||||||
|
<iso sign="no" multi_arch="no" />
|
||||||
|
<gpg/>
|
||||||
|
<sync/>
|
||||||
|
</profile>
|
||||||
|
</bdisk>
|
188
docs/examples/single_profile.xml
Normal file
188
docs/examples/single_profile.xml
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<bdisk xmlns="http://bdisk.square-r00t.net/"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://bdisk.square-r00t.net bdisk.xsd">
|
||||||
|
<profile name="default" id="1" uuid="8cdd6bcb-c147-4a63-9779-b5433c510dbc">
|
||||||
|
<meta>
|
||||||
|
<names>
|
||||||
|
<name>BDISK</name>
|
||||||
|
<!--<name>{xpath%../uxname/text()}</name>-->
|
||||||
|
<uxname>bdisk</uxname>
|
||||||
|
<!-- Just like with previous versions of BDisk, you can reference other values...
|
||||||
|
but now with the neat benefits of XPath! Everything you could do in build.ini's and more.
|
||||||
|
See https://www.w3schools.com/xml/xpath_syntax.asp
|
||||||
|
If you need a literal curly brace, double them (e.g. for "{foo}", use "{{foo}}"),
|
||||||
|
UNLESS it's in a <regexes><pattern> as part of the expression. Those are taken as literal strings. -->
|
||||||
|
<pname>{xpath%../name/text()}</pname>
|
||||||
|
</names>
|
||||||
|
<desc>A rescue/restore live environment.</desc>
|
||||||
|
<dev>
|
||||||
|
<author>A. Dev Eloper</author>
|
||||||
|
<email>dev@domain.tld</email>
|
||||||
|
<website>https://domain.tld/~dev</website>
|
||||||
|
</dev>
|
||||||
|
<uri>https://domain.tld/projname</uri>
|
||||||
|
<ver>1.0.0</ver>
|
||||||
|
<!-- This is the VERY FIRST value parsed, and is required. It controls how many levels of {xpath%...} to recurse. -->
|
||||||
|
<!-- If the maximum level is reached, the substitution will evaluate as blank. -->
|
||||||
|
<max_recurse>5</max_recurse>
|
||||||
|
<!-- You need to store regex patterns here and reference them in a special way later, and it's only valid for certain
|
||||||
|
items. See the manual for more information. NO btags within the patterns is allowed. -->
|
||||||
|
<regexes>
|
||||||
|
<pattern id="tarball_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_x86_64">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-x86_64\.tar\.gz\.sig$
|
||||||
|
</pattern>
|
||||||
|
<pattern id="tarball_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz$</pattern>
|
||||||
|
<pattern id="sig_i686">archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-i686\.tar\.gz\.sig$</pattern>
|
||||||
|
</regexes>
|
||||||
|
<!-- You can also define variables. NO xpath or regex btags, and they can't be used within other btags! -->
|
||||||
|
<variables>
|
||||||
|
<variable id="bdisk_root">/var/tmp/BDisk</variable>
|
||||||
|
</variables>
|
||||||
|
</meta>
|
||||||
|
<accounts>
|
||||||
|
<!-- Salted/hashed password is "test" -->
|
||||||
|
<rootpass hashed="true">$6$7KfIdtHTcXwVrZAC$LZGNeMNz7v5o/cYuA48FAxtZynpIwO5B1CPGXnOW5kCTVpXVt4SypRqfM.AoKkFt/O7MZZ8ySXJmxpELKmdlF1</rootpass>
|
||||||
|
<user sudo="true">
|
||||||
|
<username>{xpath%../../../meta/names/uxname/text()}</username>
|
||||||
|
<!-- You can also use substitution from different profiles in this same configuration: -->
|
||||||
|
<!-- <username>{xpath%//profile[@name='another_profile']/meta/names/uxname"}</username> -->
|
||||||
|
<comment>{xpath%../../../meta/dev/author/text()}</comment>
|
||||||
|
<password hashed="false"
|
||||||
|
hash_algo="sha512"
|
||||||
|
salt="auto">testpassword</password>
|
||||||
|
</user>
|
||||||
|
<user sudo="false">
|
||||||
|
<username>testuser</username>
|
||||||
|
<comment>Test User</comment>
|
||||||
|
<password hashed="false"
|
||||||
|
hash_algo="sha512"
|
||||||
|
salt="auto">anothertestpassword</password>
|
||||||
|
</user>
|
||||||
|
</accounts>
|
||||||
|
<sources>
|
||||||
|
<source arch="x86_64">
|
||||||
|
<mirror>http://archlinux.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_x86_64}</tarball>
|
||||||
|
<checksum hash_algo="sha1"
|
||||||
|
explicit="false"
|
||||||
|
flags="latest">sha1sums.txt</checksum>
|
||||||
|
<sig keys="7F2D434B9741E8AC"
|
||||||
|
keyserver="hkp://pool.sks-keyservers.net"
|
||||||
|
flags="regex latest">{regex%sig_x86_64}</sig>
|
||||||
|
</source>
|
||||||
|
<source arch="i686">
|
||||||
|
<mirror>http://archlinux32.mirror.domain.tld</mirror>
|
||||||
|
<rootpath>/iso/latest</rootpath>
|
||||||
|
<tarball flags="regex latest">{regex%tarball_i686}</tarball>
|
||||||
|
<checksum hash_algo="sha512"
|
||||||
|
explicit="true">cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e </checksum>
|
||||||
|
<sig keys="248BF41F9BDD61D41D060AE774EDA3C6B06D0506"
|
||||||
|
keyserver="hkp://pool.sks-keyservers.net"
|
||||||
|
flags="regex latest">{regex%sig_i686}</sig>
|
||||||
|
</source>
|
||||||
|
</sources>
|
||||||
|
<packages>
|
||||||
|
<package repo="core">openssh</package>
|
||||||
|
</packages>
|
||||||
|
<build its_full_of_stars="true">
|
||||||
|
<paths>
|
||||||
|
<base>{variable%bdisk_root}/base</base>
|
||||||
|
<cache>{variable%bdisk_root}/cache</cache>
|
||||||
|
<chroot>{variable%bdisk_root}/chroots</chroot>
|
||||||
|
<overlay>{variable%bdisk_root}/overlay</overlay>
|
||||||
|
<templates>{variable%bdisk_root}/templates</templates>
|
||||||
|
<mount>/mnt/{xpath%//meta/names/uxname/text()}</mount>
|
||||||
|
<distros>{variable%bdisk_root}/distros</distros>
|
||||||
|
<dest>{variable%bdisk_root}/results</dest>
|
||||||
|
<iso>{variable%bdisk_root}/iso_overlay</iso>
|
||||||
|
<http>{variable%bdisk_root}/http</http>
|
||||||
|
<tftp>{variable%bdisk_root}/tftp</tftp>
|
||||||
|
<pki>{variable%bdisk_root}/pki</pki>
|
||||||
|
</paths>
|
||||||
|
<basedistro>archlinux</basedistro>
|
||||||
|
</build>
|
||||||
|
<iso sign="true" multi_arch="true"/>
|
||||||
|
<ipxe sign="true" iso="true">
|
||||||
|
<uri>{xpath%//meta/dev/website/text()}/ipxe</uri>
|
||||||
|
</ipxe>
|
||||||
|
<pki overwrite="false">
|
||||||
|
<!-- http://ipxe.org/crypto -->
|
||||||
|
<ca>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/ca.crt</cert>
|
||||||
|
<!-- If csr is self-enclosed (<csr />), we'll just generate and use a CSR in-memory.
|
||||||
|
Assuming we need to generate a certificate, anyways.
|
||||||
|
If you want to write it out to disk (for debugging, etc.) OR use one already generated,
|
||||||
|
then provide a path.
|
||||||
|
e.g.:
|
||||||
|
<csr>{xpath%build/paths/ssl/text()}/ca.csr</csr> -->
|
||||||
|
<csr/>
|
||||||
|
<!-- If you use an index file (or want to) to serialize client certificates, specify it here. -->
|
||||||
|
<!-- It must conform to CADB spec (https://pki-tutorial.readthedocs.io/en/latest/cadb.html). -->
|
||||||
|
<!-- You should probably also specify a serial file if so. -->
|
||||||
|
<!-- Both of these are entirely optional if you aren't using an existing PKI. -->
|
||||||
|
<index>{xpath%../../../build/paths/pki/text()}/index.txt</index>
|
||||||
|
<serial>{xpath%../../../build/paths/pki/text()}/serial</serial>
|
||||||
|
<!-- If you specify a cipher, the key will be encrypted to the passphrase provided by the passphrase attribute.
|
||||||
|
If the key is encrypted (either a pre-existing or a created one) but passphrase is not provided, you will
|
||||||
|
be (securely) prompted for the passphrase to unlock it/add a passphrase to it. -->
|
||||||
|
<key cipher="none"
|
||||||
|
passphrase="none"
|
||||||
|
keysize="4096">{xpath%../../../build/paths/pki/text()}/ca.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>domain.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</ca>
|
||||||
|
<client>
|
||||||
|
<cert hash_algo="sha512">{xpath%../../../build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.crt</cert>
|
||||||
|
<csr/>
|
||||||
|
<key cipher="none"
|
||||||
|
passphrase="none"
|
||||||
|
keysize="4096">{xpath%//build/paths/pki/text()}/{xpath%../../../meta/names/uxname/text()}.key</key>
|
||||||
|
<subject>
|
||||||
|
<commonName>website.tld</commonName>
|
||||||
|
<countryName>XX</countryName>
|
||||||
|
<localityName>Some City</localityName>
|
||||||
|
<stateOrProvinceName>Some State</stateOrProvinceName>
|
||||||
|
<organization>Some Org, Inc.</organization>
|
||||||
|
<organizationalUnitName>Department Name</organizationalUnitName>
|
||||||
|
<emailAddress>{xpath%../../../../meta/dev/email/text()}</emailAddress>
|
||||||
|
</subject>
|
||||||
|
</client>
|
||||||
|
</pki>
|
||||||
|
<!-- If prompt_passphrase is "false" and passphrase attribute is not given for a gpg element, we will try to use a
|
||||||
|
blank passphrase for all operations. -->
|
||||||
|
<gpg keyid="none"
|
||||||
|
gnupghome="none"
|
||||||
|
publish="false"
|
||||||
|
prompt_passphrase="false">
|
||||||
|
<!-- The below is only used if we are generating a key (i.e. keyid="none"). -->
|
||||||
|
<key algo="rsa" keysize="4096" expire="0">
|
||||||
|
<name>{xpath%../../../meta/dev/author/text()}</name>
|
||||||
|
<email>{xpath%../../../meta/dev/email/text()}</email>
|
||||||
|
<comment>for {xpath%../../../meta/names/pname/text()} [autogenerated] | {xpath%../../../meta/uri/text()} | {xpath%../../../meta/desc/text()}</comment>
|
||||||
|
</key>
|
||||||
|
</gpg>
|
||||||
|
<sync>
|
||||||
|
<!-- ipxe includes the http directory. or should, anyways. -->
|
||||||
|
<ipxe enabled="true">/srv/http/{xpath%../../meta/names/uxname/text()}</ipxe>
|
||||||
|
<tftp enabled="true">/tftproot/{xpath%../../meta/names/uxname/text()}</tftp>
|
||||||
|
<iso enabled="true">/srv/http/isos/{xpath%../../meta/names/uxname/text()}</iso>
|
||||||
|
<gpg enabled="true"
|
||||||
|
format="asc">/srv/http/{xpath%../../meta/names/uxname/text()}/pubkey.asc</gpg>
|
||||||
|
<rsync enabled="true">
|
||||||
|
<user>root</user>
|
||||||
|
<host>mirror.domain.tld</host>
|
||||||
|
<port>22</port>
|
||||||
|
<pubkey>~/.ssh/id_ed25519</pubkey>
|
||||||
|
</rsync>
|
||||||
|
</sync>
|
||||||
|
</profile>
|
||||||
|
</bdisk>
|
@ -3,4 +3,4 @@ include::DEV.adoc[]
|
|||||||
include::BOOT.adoc[]
|
include::BOOT.adoc[]
|
||||||
include::FURTHER.adoc[]
|
include::FURTHER.adoc[]
|
||||||
include::FAQ.adoc[]
|
include::FAQ.adoc[]
|
||||||
include::FOOT.adoc[]
|
//include::FOOT.adoc[]
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
= Netboot
|
= Netboot
|
||||||
[partintro]
|
|
||||||
.What good is software if you can't see it in action?
|
It's possible to netboot my personal build of BDisk (or any environment built with BDisk, but this serves as an example for your own setup). I mostly keep this up for emergencies in case I need it, but it's good to show you that yes, you can boot a 2GB+ squashed and compressed filesystem from a <50MB image file.
|
||||||
--
|
|
||||||
It's possible to netboot my personal build of BDisk. I mostly keep this up for emergencies in case I need it, but it's good to show you that yes, you can boot a 2GB+ squashed and compressed filesystem from a <50MB image file.
|
|
||||||
--
|
|
||||||
|
|
||||||
include::netboot/HOWTO.adoc[]
|
include::netboot/HOWTO.adoc[]
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
= Developer Manual
|
= Developer Manual
|
||||||
[partintro]
|
|
||||||
.What good is software if nobody changes it?
|
BDisk can be sourced for other projects, as it's written in a modular manner. Version 4.x aims to be installable as a standard Python module. This developer manual intends to provide information you may need to change parts of BDisk (or change how it behaves) -- it *is* opensource, after all!
|
||||||
--
|
|
||||||
BDisk can be sourced for other projects, as it's written in a modular manner. Future versions may support installation as a normal Python module. This will also provide information you may need to change parts of BDisk -- it *is* opensource, after all!
|
|
||||||
--
|
|
||||||
|
|
||||||
include::dev/FUNCTIONS.adoc[]
|
include::dev/FUNCTIONS.adoc[]
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
= FAQ
|
= FAQ
|
||||||
[partintro]
|
|
||||||
.What good is software if nobody understands it?
|
|
||||||
--
|
|
||||||
Here you will find some answers to Frequently Asked Questions I've received about this project. Please be sure to check this list before <<FURTHER.adoc#_bug_reports_feature_requests, opening a bug report>> or sending a patch!
|
Here you will find some answers to Frequently Asked Questions I've received about this project. Please be sure to check this list before <<FURTHER.adoc#_bug_reports_feature_requests, opening a bug report>> or sending a patch!
|
||||||
--
|
|
||||||
|
|
||||||
include::faq/INDEX.adoc[]
|
include::faq/INDEX.adoc[]
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
= Further Reading/Resources
|
= Further Reading/Resources
|
||||||
[partintro]
|
|
||||||
.What good is software if you can't interact?
|
|
||||||
--
|
|
||||||
Here you will find further info, other resources, and such relating to BDisk.
|
Here you will find further info, other resources, and such relating to BDisk.
|
||||||
--
|
|
||||||
|
|
||||||
include::further/PASSWORDS.adoc[]
|
include::further/PASSWORDS.adoc[]
|
||||||
include::further/BUGS.adoc[]
|
include::further/BUGS.adoc[]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
= BDisk User and Developer Manual
|
= BDisk User and Developer Manual
|
||||||
Brent Saner <bts@square-r00t.net>
|
Brent Saner <bts@square-r00t.net>
|
||||||
v1.3, 2017-08-20
|
v2.0, 2018-05
|
||||||
:doctype: book
|
:doctype: book
|
||||||
:data-uri:
|
:data-uri:
|
||||||
:imagesdir: images
|
:imagesdir: images
|
||||||
@ -11,7 +11,7 @@ v1.3, 2017-08-20
|
|||||||
:sectnums:
|
:sectnums:
|
||||||
:toclevels: 2
|
:toclevels: 2
|
||||||
// So there's currently a "bug" in that the TOC will display with continued numbering across parts.
|
// So there's currently a "bug" in that the TOC will display with continued numbering across parts.
|
||||||
// i essentially want the opposite of https://github.com/asciidoctor/asciidoctor/issues/979 TODO
|
// I essentially want the opposite of https://github.com/asciidoctor/asciidoctor/issues/979 TODO
|
||||||
|
|
||||||
[dedication]
|
[dedication]
|
||||||
= Thanks
|
= Thanks
|
||||||
@ -29,13 +29,16 @@ A lot of research went into how low-level boot operations take place when writin
|
|||||||
=== What is BDisk?
|
=== What is BDisk?
|
||||||
BDisk refers to both a live distribution I use in my own uses (for rescue situations, recovery, etc.) but foremost and most importantly, it also refers to the tool I use for building that distribution. In other words, it's both a complete GNU/Linux distribution you can run entirely from USB/CD/DVD/etc. (without needing to install it to your hard drive)... and also the name of a tool to create a custom GNU/Linux install. The latter is what this project and documentation refer to when the word “BDisk” is used.
|
BDisk refers to both a live distribution I use in my own uses (for rescue situations, recovery, etc.) but foremost and most importantly, it also refers to the tool I use for building that distribution. In other words, it's both a complete GNU/Linux distribution you can run entirely from USB/CD/DVD/etc. (without needing to install it to your hard drive)... and also the name of a tool to create a custom GNU/Linux install. The latter is what this project and documentation refer to when the word “BDisk” is used.
|
||||||
|
|
||||||
This documentation was started when I rewrote BDisk in Python 3.x; versions 0.x-2.x of BDisk were written in Bash, and horribly inelegant and rigid. footnote:[I should take the time to note that I am still quite new to Python so expect there to be plenty of optimizations to be made and general WTF-ery from seasoned Python developers. If you encounter any bugs or improvements, please <<FURTHER.adoc#_bug_reports_feature_requests,report them>>! It'd be much appreciated.] One of my main goals was to make BDisk as easy to use as possible. This is surprisingly hard to do- it’s quite challenging to try to approach software you’ve written with the mindset of someone other than you.
|
This documentation was started when I rewrote BDisk in Python 3.x; versions 0.x-2.x of BDisk were written in Bash, and horribly inelegant and rigid. It was a valiant effort, and *mostly* worked. Until it stopped working. To my knowledge, it is (or was) in use by https://ninjaos.org[NinjaOS^] as well as a project for education purposes in Indonesia, though I imagine it's in use other places as well. Ideally it should help those wishing to offer specialized GNU/Linux live media or install CDs.
|
||||||
|
|
||||||
It’s my hope that by releasing this utility (and documenting it), you can use it and save some time for yourself as well (and hopefully get the chance to learn a bit more in the process!).
|
Version 4.x is an entire rewrite to be much more modular and implement a much more flexible structure based on feature requests that have accumulated over time. footnote:[I should take the time to note that I am still quite new to Python so expect there to be plenty of optimizations to be made and general WTF-ery from seasoned Python developers. If you encounter any bugs or improvements, please <<FURTHER.adoc#_bug_reports_feature_requests,report them>>! It'd be much appreciated.]
|
||||||
|
|
||||||
|
One of my main goals was to make BDisk as easy to use as possible. This is surprisingly hard to do- it’s quite challenging to try to approach software you’ve written with the mindset of someone other than you.
|
||||||
|
|
||||||
|
It’s my hope that by releasing this utility (and documenting it), you can use it and save some time for yourself as well (and hopefully get the chance to learn a bit more in the process!).
|
||||||
|
|
||||||
It of course is not the <<i_don_t_like_bdisk_are_there_any_other_alternatives,only live media creator>> out there, but most others only focus on remastering an existing ISO, or creating an installer ISO -- not creating a custom live-centric environment.
|
It of course is not the <<i_don_t_like_bdisk_are_there_any_other_alternatives,only live media creator>> out there, but most others only focus on remastering an existing ISO, or creating an installer ISO -- not creating a custom live-centric environment.
|
||||||
|
|
||||||
|
|
||||||
=== Copyright/Licensing
|
=== Copyright/Licensing
|
||||||
The BDisk code is https://www.gnu.org/licenses/gpl-3.0.en.html[GPLv3-licensed^]. This means that you can use it for business reasons, personal reasons, modify it, etc. Please be sure to familiarize yourself with the full set of terms. You can find the full license in `docs/LICENSE`.
|
The BDisk code is https://www.gnu.org/licenses/gpl-3.0.en.html[GPLv3-licensed^]. This means that you can use it for business reasons, personal reasons, modify it, etc. Please be sure to familiarize yourself with the full set of terms. You can find the full license in `docs/LICENSE`.
|
||||||
|
|
||||||
@ -45,4 +48,4 @@ This document, and all other associated author-generated documentation, are rele
|
|||||||
|
|
||||||
image::https://i.creativecommons.org/l/by-sa/4.0/88x31.png[CC-BY-SA_4.0,align="center"]
|
image::https://i.creativecommons.org/l/by-sa/4.0/88x31.png[CC-BY-SA_4.0,align="center"]
|
||||||
|
|
||||||
include::BODY.adoc[]
|
include::BODY.adoc[]
|
13
docs/manual/TODO
Normal file
13
docs/manual/TODO
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
- dev/{FUNCTIONS.adoc,functions/}
|
||||||
|
need to update with new subpackages/functions etc.
|
||||||
|
|
||||||
|
- script macOS tool for imaging to USB?
|
||||||
|
|
||||||
|
- in faq/LONGTIME.adoc, in ==== Configuring the local mirror and ==== Configuring BDisk, mirrorlist should be part of the archlinux plugin - NOT a distributed hardcoded file. (can we then get rid of <paths><base> entirely?)
|
||||||
|
|
||||||
|
- in faq/ISOBIG.adoc and the doc section it references, make sure we reference that the package lists are now in the environment plugin!
|
||||||
|
|
||||||
|
- change all references to build.ini to something like "BDisk configuration file"
|
||||||
|
|
||||||
|
- reminder: users can specify a local file source for <sources><source> items by using "file:///absolute/path/to/file"
|
||||||
|
-- todo: add http auth, ftp, ftps
|
@ -1,13 +1,11 @@
|
|||||||
= User Manual
|
= User Manual
|
||||||
|
|
||||||
[partintro]
|
BDisk was ultimately designed to make your life easier. "Why would I possibly need yet another LiveCD/LiveUSB?" Well, that's sort of the point- by customizing a live distribution of GNU/Linux to _your_ particular needs/desires/whimsy, you can do away with the multiple other images you keep around. It's designed to let you create a fully customized distribution/live environment.
|
||||||
.What good is software if nobody uses it?
|
|
||||||
--
|
|
||||||
BDisk was ultimately designed to make your life easier. "Why would I possibly need yet another LiveCD/LiveUSB?" Well, that's sort of the point- by customizing a live distribution of GNU/Linux to _your_ particular needs/desires/whimsy, you can do away with the multiple other images you keep around. It's designed to let you create a fully customized distribution.
|
|
||||||
|
|
||||||
Using BDisk, you can:
|
Using BDisk, you can:
|
||||||
|
|
||||||
* Install GNU/Linux (https://wiki.archlinux.org/index.php/installation_guide[Arch^], https://watchmysys.com/blog/2015/02/installing-centos-7-with-a-chroot/[CentOS^], https://www.debian.org/releases/stable/amd64/apds03.html.en[Debian^], https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo^], https://help.ubuntu.com/lts/installation-guide/powerpc/apds04.html[Ubuntu^]...). BDisk may be Arch-based, but many if not most other distros offer ways to install from any GNU/Linux live distribution.
|
* Install GNU/Linux (https://wiki.archlinux.org/index.php/installation_guide[Arch^], https://watchmysys.com/blog/2015/02/installing-centos-7-with-a-chroot/[CentOS^], https://www.debian.org/releases/stable/amd64/apds03.html.en[Debian^], https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo^], https://help.ubuntu.com/lts/installation-guide/powerpc/apds04.html[Ubuntu^]...). BDisk's flagship and guaranteed guest distro may be Arch-based, but many if not most other distros offer ways to install from any GNU/Linux live distribution. Plus, with the 4.x rewrite, it is possible to add support for any modern GNU/Linux guest distro.
|
||||||
|
** This means one could easily create an http://aif.square-r00t.net/[automated Arch install ISO^], or Gentoo installer, etc.
|
||||||
* Perform disk maintenance (https://raid.wiki.kernel.org/index.php/RAID_setup[mdadm^], fdisk / http://www.rodsbooks.com/gdisk/[gdisk^], http://gparted.org/[gparted^], https://www.thomas-krenn.com/en/wiki/StorCLI[storcli^], etc.). Need to replace that disk in your RAID and you don't have hotswap? Not a problem!
|
* Perform disk maintenance (https://raid.wiki.kernel.org/index.php/RAID_setup[mdadm^], fdisk / http://www.rodsbooks.com/gdisk/[gdisk^], http://gparted.org/[gparted^], https://www.thomas-krenn.com/en/wiki/StorCLI[storcli^], etc.). Need to replace that disk in your RAID and you don't have hotswap? Not a problem!
|
||||||
* Rescue, recover, wipe (http://www.sleuthkit.org/sleuthkit/[scalpel^], http://www.andybev.com/index.php/Nwipe[nwipe^], http://foremost.sourceforge.net/[foremost^], etc.). Chances are this is why you booted a live distro in the first place, yes?
|
* Rescue, recover, wipe (http://www.sleuthkit.org/sleuthkit/[scalpel^], http://www.andybev.com/index.php/Nwipe[nwipe^], http://foremost.sourceforge.net/[foremost^], etc.). Chances are this is why you booted a live distro in the first place, yes?
|
||||||
* Boot over the Internet (or LAN). Burning a new image to CD/DVD/USB is a pain. BDisk has built-in support for http://ipxe.org/[iPXE^] (and traditional PXE setups). Update the filesystem image once, deploy it everywhere.
|
* Boot over the Internet (or LAN). Burning a new image to CD/DVD/USB is a pain. BDisk has built-in support for http://ipxe.org/[iPXE^] (and traditional PXE setups). Update the filesystem image once, deploy it everywhere.
|
||||||
@ -15,7 +13,6 @@ Using BDisk, you can:
|
|||||||
** Seriously.
|
** Seriously.
|
||||||
|
|
||||||
This manual will give you the information you need to build your very own live GNU/Linux distribution.
|
This manual will give you the information you need to build your very own live GNU/Linux distribution.
|
||||||
--
|
|
||||||
|
|
||||||
include::user/GETTING_STARTED.adoc[]
|
include::user/GETTING_STARTED.adoc[]
|
||||||
include::user/IMPORTANT_CONCEPTS.adoc[]
|
include::user/IMPORTANT_CONCEPTS.adoc[]
|
||||||
@ -23,4 +20,3 @@ include::user/PROJECT_LAYOUT.adoc[]
|
|||||||
include::user/BUILDINI.adoc[]
|
include::user/BUILDINI.adoc[]
|
||||||
include::user/ADVANCED.adoc[]
|
include::user/ADVANCED.adoc[]
|
||||||
include::user/BUILDING.adoc[]
|
include::user/BUILDING.adoc[]
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
== I don't like BDisk. Are there any other alternatives?
|
== I don't like BDisk. Are there any other alternatives?
|
||||||
First, I'm sorry to hear that BDisk doesn't suit your needs. If you want any features you think are missing or encounter any <<FURTHER.adoc#bug_reports_feature_requests, bugs>>, please report them!
|
First, I'm sorry to hear that BDisk doesn't suit your needs. If you want any features you think are missing or encounter any <<FURTHER.adoc#bug_reports_feature_requests, bugs>>, please report them!
|
||||||
|
|
||||||
But yes; there are plenty of alternatives!
|
But yes; there are plenty of alternatives! I encourage you to search for yourself, but I've tried to be as impartial as I can for the below.
|
||||||
|
|
||||||
NOTE: Only *currently maintained projects* are listed here.
|
NOTE: Only *currently maintained projects* are listed here.
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ Written in Bash.
|
|||||||
|======================
|
|======================
|
||||||
|Pros|Cons
|
|Pros|Cons
|
||||||
|Somewhat customizable|Requires manual initialization of chroot(s) via https://github.com/rpm-software-management/mock/wiki[mock^]
|
|Somewhat customizable|Requires manual initialization of chroot(s) via https://github.com/rpm-software-management/mock/wiki[mock^]
|
||||||
|Uses kickstart configurations|*Requires* a kickstart configuration to be useful
|
|Uses kickstart configurations|*Requires* a kickstart configuration in order to be useful
|
||||||
|Simple/easy to use|Full featureset unknown; documentation is sparse
|
|Simple/easy to use|Full featureset unknown; documentation is sparse
|
||||||
||Limited configuration/customization
|
||Limited configuration/customization
|
||||||
||
|
||
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
== How do I get the version/build of an ISO?
|
== How do I get the version/build of an ISO?
|
||||||
This can be found in a multitude of places. The full-size ISO file (iso/<distname>-<git tag>-<git rev number>-(32|64|any).iso) should have the version right in the filename. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux, *BSD, or Mac OS X/macOS and check `/path/to/mounted/iso/VERSION_INTO.txt`. Lastly, within the runtime itself (especially handy if booting via iPXE), you can check `/root/VERSION_INFO.txt` to get information about the build of the currently running live environment.
|
This can be found in a multitude of places. The full-size ISO file should have the version right in the filename. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux, *BSD, or Mac OS X/macOS and check `/path/to/mounted/iso/VERSION_INFO.txt`. Lastly, within the runtime itself (especially handy if booting via iPXE), you can check `/root/VERSION_INFO.txt` to get information about the build of the currently running live environment.
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
== Why does building take so long?
|
== Why does building take so long?
|
||||||
This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server. If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots. Thankfully, there is an easy fix. You can install http://www.issihosts.com/haveged/[haveged^] and run it (this can be done safely while a build is executing). This will show an immediate and non-negligible improvement for the above contexts. If you have extra processing power to throw at the build process (or are using a dedicated build box) as well, I recommend enabling <<code_i_am_a_racecar_code,`i_am_a_racecar`>>. BDisk will then be more aggressive with its resource consumption.
|
This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server. If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots. Thankfully, there is an easy fix. You can install http://www.issihosts.com/haveged/[haveged^] and run it (this can be done safely while a build is executing). This will show an immediate and non-negligible improvement for the above contexts. If you have extra processing power to throw at the build process (or are using a dedicated build box) as well, I recommend enabling <<code_its_full_of_stars,`its_full_of_stars`>>. BDisk will then be more aggressive with its resource consumption.
|
||||||
|
|
||||||
=== Running a local mirror
|
=== Running a local mirror
|
||||||
Keep in mind also that the more packages you opt to install, the longer the build process will take. This process will also use quite a fair bit of bandwidth. If you plan on building regular images (e.g. nightly builds, etc.) or are undergoing some custom change testing, I recommend running a private repository mirror on-site. This will not store AUR packages, as those will still be fetched and built (documentation on working around this is TODO) but setting up a local mirror is quite quick and easy.
|
Keep in mind also that the more packages you opt to install, the longer the build process will take. This process will also use quite a fair bit of bandwidth. If you plan on building regular images (e.g. nightly builds, etc.) or are undergoing some custom change testing, I recommend running a private repository mirror on-site. For Arch-based builds, this will not store AUR packages, as those will still be fetched and built (documentation on working around this is TODO) but setting up a local mirror is quite quick and easy. We'll of course use Arch as an example since that's the default guest environment (though I have a https://git.square-r00t.net/OpTools/tree/centos/repoclone[script^] for CentOS as well).
|
||||||
|
|
||||||
First, you'll need at least 70Gb of free disk space. Let's say our repository clone will be at `/srv/repo/arch/`.
|
First, you'll need at least 90Gb of free disk space. Let's say our repository clone will be at `/srv/repo/arch/`.
|
||||||
|
|
||||||
You'll also need to find an Arch mirror, ideally one close to you that is up-to-date. The https://www.archlinux.org/mirrorlist/[mirrorlist generator^] and https://www.archlinux.org/mirrors/[mirror list^] will assist you here greatly.
|
You'll also need to find an Arch mirror, ideally one close to you that is up-to-date. The https://www.archlinux.org/mirrorlist/[mirrorlist generator^] and https://www.archlinux.org/mirrors/[mirror list^] will assist you here greatly.
|
||||||
|
|
||||||
@ -14,81 +14,57 @@ TIP: You can use ANY distro to run a repository mirror, as long as it has _rsync
|
|||||||
|
|
||||||
==== Set up the sync
|
==== Set up the sync
|
||||||
|
|
||||||
Create a script and mark it as executable with the following content:
|
I have https://git.square-r00t.net/OpTools/tree/arch/repoclone.py[written a script^] that does the heavy-lifting! https://git.square-r00t.net/OpTools/plain/arch/repoclone.py[Download it^] and mark it as executable (`chmod +x repoclone.py`). Make sure you read the --help option and edit `~/.config/optools/repoclone/arch.ini`.
|
||||||
|
|
||||||
#!/bin/bash
|
Assuming you want to run the sync script every 6 hours, this is the cron entry you would use (`crontab -e`):
|
||||||
SOURCE='rsync://your.mirror.here/archlinux'
|
|
||||||
DEST='/srv/repo/arch'
|
|
||||||
LCK_FLE='/var/run/repo-sync.lck'
|
|
||||||
PATH='/usr/bin'
|
|
||||||
if [ -e "${LCK_FLE}" ] ; then
|
|
||||||
OTHER_PID=$(cat ${LCK_FLE})
|
|
||||||
echo "Another instance already running: ${OTHER_PID}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# If e.g. /srv/repo is a mountpoint, uncomment below.
|
|
||||||
#findmnt /srv/repo > /dev/null 2>&1
|
|
||||||
#if [[ "${?}" -ne '0' ]];
|
|
||||||
#then
|
|
||||||
# echo "External storage not mounted!"
|
|
||||||
# exit 1
|
|
||||||
#fi
|
|
||||||
echo $$ > "${LCK_FLE}"
|
|
||||||
rsync -rvtlH --delete-after --delay-updates --safe-links --max-delete=1000 ${SOURCE}/. ${DEST}/. >> /var/log/arch.repo.sync 2>&1
|
|
||||||
date +%s > ${DEST}/lastsync
|
|
||||||
rm -f "${LCK_FLE}"
|
|
||||||
|
|
||||||
Assuming you want to run the sync script every 6 hours and it is located at `/root/bin/arch.repo.clone.sh`, this is the cron entry you would use (`crontab -e`):
|
0 */6 * * * /path/to/repoclone.py
|
||||||
|
|
||||||
0 */6 * * * /root/bin/arch.repo.clone.sh > /dev/null 2>&1
|
The first sync can take quite a while, but subsequent runs shouldn't take more than five minutes or so (depending on how many updates are available).
|
||||||
|
|
||||||
The first sync can take quite a while, but subsequent runs shouldn't take more than five minutes or so.
|
|
||||||
|
|
||||||
==== Configuring the local mirror
|
==== Configuring the local mirror
|
||||||
You'll need a way to serve this local mirror in a way pacman can understand. Luckily, it's fairly easy. I recommend using https://www.nginx.com/[nginx^] as it's available by default in many operating systems. You can of course use others such as https://www.lighttpd.net/[lighttpd^], https://httpd.apache.org/[apache/httpd^], etc. For the example configuration here, we're going to use an nginx configuration file.
|
You'll need a way to serve this local mirror in a way pacman can understand. Luckily, it's fairly easy. I recommend using https://www.nginx.com/[nginx^] as it's available by default in many operating systems. You can of course use others such as https://www.lighttpd.net/[lighttpd^], https://httpd.apache.org/[apache/httpd^], etc. For the example configuration here, we're going to use an nginx configuration file.
|
||||||
|
|
||||||
```
|
```
|
||||||
server {
|
server {
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
access_log /var/log/nginx/repo.access.log main;
|
access_log /var/log/nginx/repo.access.log main;
|
||||||
error_log /var/log/nginx/repo.error.log;
|
error_log /var/log/nginx/repo.error.log;
|
||||||
#error_log /var/log/nginx/repo.error.log debug;
|
#error_log /var/log/nginx/repo.error.log debug;
|
||||||
|
|
||||||
autoindex on;
|
autoindex on;
|
||||||
|
|
||||||
root /srv/repo/arch;
|
root /srv/repo/arch;
|
||||||
|
}
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The configuration may vary according to your distribution's provided nginx default configuration, but you'll want this configuration to be served as the default (or set an appropriate `https://nginx.org/en/docs/http/server_names.html[server_name]` directive which you would then use in `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist`).
|
The configuration may vary according to your distribution's provided nginx default configuration, but you'll want this configuration to be served as the default (or set an appropriate `https://nginx.org/en/docs/http/server_names.html[server_name]` directive which you would then use in `<profile><build><paths><base>/etc/pacman.d/mirrorlist`).
|
||||||
|
|
||||||
==== Configuring BDisk
|
==== Configuring BDisk
|
||||||
|
|
||||||
You'll then want to configure BDisk's chroots to use your local mirror first. However, when doing so you run into an issue -- in the built image, install operations will take longer than they need to because the local mirror likely won't be available! This is a small issue as it's unexpected that you'll need to install software within the live environment, but I've run into cases where it was a necessity once or twice.
|
You'll then want to configure BDisk's chroots to use your local mirror first. However, if you want to use a LAN resource mirror, when doing so you run into an issue -- in the built image, install operations will take longer than they need to because the local mirror likely won't be available! This is a small issue as it's unexpected that you'll need to install software within the live environment, but I've run into cases where it was a necessity once or twice.
|
||||||
|
|
||||||
There is an https://devblog.square-r00t.net/articles/libvirt-spoof-domains-dns-records-redirect-to-another-ip[easy workaround^] if you're using libvirt -- you can simply tell your build VM to resolve to the IP address of the box that is running the mirror for the same FQDN that the "preferred" "real" mirror on the Internet is and set that mirror at the top of `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist`. However, that's not always feasible- most notably if you're building on a physical box and it's the same host as the repository clone. In that case you can set the specific local resolution -- e.g. `http://127.0.0.1/` -- at the top of `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist` and then set a mirrorlist WITHOUT that entry in `<basedir>/overlay/etc/pacman.d/mirrorlist`. For more information on using these type of overrides, see <<advanced_customization>>.
|
There is an https://devblog.square-r00t.net/articles/libvirt-spoof-domains-dns-records-redirect-to-another-ip[easy workaround^] if you're using libvirt to build -- you can simply tell your build VM to resolve to the IP address of the box that is running the mirror for the same FQDN that the "preferred" "real" mirror on the Internet and set that mirror at the top of `<profile><build><paths><base>/etc/pacman.d/mirrorlist`. However, that's not always feasible- most notably if you're building on a physical box and it's the same host as the repository clone. In that case you can set the specific local resolution -- e.g. `http://127.0.0.1/` -- at the top of `<profile><build><paths><base>/etc/pacman.d/mirrorlist` and then set a mirrorlist WITHOUT that entry in `<profile><build><paths><overlay>/etc/pacman.d/mirrorlist`. For more information on using these type of overrides, see <<advanced_customization>>.
|
||||||
|
|
||||||
If you're using the libvirt workaround, remember to configure nginx (or whatever you're using) with a virtual host and location block that matches the "real", upstream mirror. In our example below, we use *http://mirror.us.leaseweb.net/archlinux* as the mirror.
|
If you're using the libvirt workaround, remember to configure nginx (or whatever you're using) with a virtual host and location block that matches the "real", upstream mirror. In our example below, we use *http://arch.mirror.square-r00t.net* as the mirror.
|
||||||
|
|
||||||
```
|
```
|
||||||
server {
|
server {
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
access_log /var/log/nginx/repo.access.log main;
|
access_log /var/log/nginx/repo.access.log main;
|
||||||
error_log /var/log/nginx/repo.error.log;
|
error_log /var/log/nginx/repo.error.log;
|
||||||
#error_log /var/log/nginx/repo.error.log debug;
|
#error_log /var/log/nginx/repo.error.log debug;
|
||||||
|
|
||||||
server_name mirror.us.leaseweb.net;
|
server_name arch.mirror.square-r00t.net;
|
||||||
|
|
||||||
autoindex on;
|
autoindex on;
|
||||||
|
|
||||||
root /srv/repo/arch;
|
root /srv/repo/arch;
|
||||||
|
|
||||||
location /archlinux {
|
location /archlinux {
|
||||||
autoindex on;
|
autoindex on;
|
||||||
rewrite ^/archlinux(/.*)$ /$1;
|
rewrite ^/archlinux(/.*)$ /$1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
== Why Arch Linux?
|
== Why Arch Linux as the Recommended Guest?
|
||||||
Because it's a very easy-to-use, simple, https://wiki.archlinux.org/[well-documented^] distro. It's no-frills and incredibly flexible/customizable, and can be made rather slim (and is out of the box, in fact). It's also very friendly to run as a chroot inside any other distro or as a chroot host to any other distro.
|
Because it's a very easy-to-use, simple, https://wiki.archlinux.org/[well-documented^] distro. It's no-frills and incredibly flexible/customizable, and can be made rather slim (and is out of the box, in fact). It's also very friendly to run as a chroot inside any other distro or as a chroot host to any other distro.
|
||||||
|
|
||||||
Plus they release monthly tarball snapshots that are fairly small and create quick bootstrap environments.
|
Plus they release monthly tarball snapshots that are fairly small and create quick bootstrap environments.
|
||||||
|
@ -2,17 +2,17 @@
|
|||||||
NOTE: It is possible to submit a bug or feature request without registering in my bugtracker. One of my pet peeves is needing to create an account/register on a bugtracker simply to report a bug! The following links only require an email address to file a bug (which is necessary in case I need any further clarification from you or to keep you updated on the status of the bug/feature request -- so please be sure to use a valid email address).
|
NOTE: It is possible to submit a bug or feature request without registering in my bugtracker. One of my pet peeves is needing to create an account/register on a bugtracker simply to report a bug! The following links only require an email address to file a bug (which is necessary in case I need any further clarification from you or to keep you updated on the status of the bug/feature request -- so please be sure to use a valid email address).
|
||||||
|
|
||||||
=== Bugs
|
=== Bugs
|
||||||
If you encounter any bugs in *BDisk*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1&product_category=5[here^].
|
If you encounter any bugs in *BDisk*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1[here^].
|
||||||
|
|
||||||
If you encounter any bugs (inaccurate information, typos, misformatting, etc.) in *this documentation*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1&product_category=26[here^].
|
If you encounter any bugs (inaccurate information, typos, misformatting, etc.) in *this documentation*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=8&task_type=1[here^].
|
||||||
|
|
||||||
=== Feature Requests
|
=== Feature Requests
|
||||||
If you have any features you'd like to see or you think would help *BDisk* become even more useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2&product_category=5[here^].
|
If you have any features you'd like to see or you think would help *BDisk* become even more useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2[here^].
|
||||||
|
|
||||||
If you have any suggestions on how to improve *this documentation* or feel it's missing information that could be useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2&product_category=26[here^].
|
If you have any suggestions on how to improve *this documentation* or feel it's missing information that could be useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=8&task_type=2[here^].
|
||||||
|
|
||||||
=== Patches
|
=== Patches
|
||||||
I gladly welcome https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[patches^], but I deplore using GitHub (even though I https://github.com/johnnybubonic/BDisk[have a mirror there^]). For this reason, please follow the same https://www.kernel.org/doc/Documentation/process/submitting-patches.rst[patch/pull request process] for the Linux kernel and email it to bts@square-r00t.net.
|
I gladly welcome https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[patches^], but I deplore using GitHub (even though I https://github.com/johnnybubonic/BDisk[have a mirror there^]). For this reason, please follow the same https://www.kernel.org/doc/Documentation/SubmittingPatches[patch/pull request process^] for the Linux kernel and email it to bts@square-r00t.net.
|
||||||
|
|
||||||
Alternatively, you may attach a patch to a <<bugs,bug report>>/<<feature_requests,feature request>>.
|
Alternatively, you may attach a patch to a <<bugs,bug report>>/<<feature_requests,feature request>>.
|
||||||
|
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 254 KiB After Width: | Height: | Size: 270 KiB |
@ -3,7 +3,7 @@ I update this server with images and iPXE images you can use to netboot my perso
|
|||||||
|
|
||||||
You can https://bdisk.square-r00t.net/download/bdisk-mini.iso[download] a demo of the iPXE functionality. Note that your computer needs to be connected to a valid Internet connection via ethernet and be able to get a DHCP lease for it to work.
|
You can https://bdisk.square-r00t.net/download/bdisk-mini.iso[download] a demo of the iPXE functionality. Note that your computer needs to be connected to a valid Internet connection via ethernet and be able to get a DHCP lease for it to work.
|
||||||
|
|
||||||
NOTE: Advanced users, you can https://www.gnupg.org/gph/en/manual/x135.html[verify^] it against the GPG signature (https://bdisk.square-r00t.net/download/bdisk-mini.iso.asc[ASC], https://bdisk.square-r00t.net/download/bdisk-mini.iso.sig[BIN]). Please see https://devblog.square-r00t.net/about/my-gpg-public-key-verification-of-identity[this blog post^] for information on fetching my keys and such. Note that while this project is in flux, I may be signing with temporarily-generated throwaway keys.
|
NOTE: Advanced users, you can https://www.gnupg.org/gph/en/manual/x135.html[verify^] it against the GPG signature (https://bdisk.square-r00t.net/download/bdisk-mini.iso.asc[ASC], https://bdisk.square-r00t.net/download/bdisk-mini.iso.sig[BIN]). Please see https://square-r00t.net/gpg-info[this blog post^] for information on fetching my keys and such. Note that while this project is in flux, I may be signing with temporarily-generated throwaway keys.
|
||||||
|
|
||||||
Once downloaded, you can follow the appropriate steps based on your operating system:
|
Once downloaded, you can follow the appropriate steps based on your operating system:
|
||||||
|
|
||||||
|
@ -7,8 +7,6 @@ NOTE: Due to requiring various mounting and chrooting, BDisk must be run as the
|
|||||||
|
|
||||||
To initiate a build, simply run `<basedir>/bdisk/bdisk.py`. That's it! Everything should continue automatically.
|
To initiate a build, simply run `<basedir>/bdisk/bdisk.py`. That's it! Everything should continue automatically.
|
||||||
|
|
||||||
If you'd like to specify a path to a specific build configuration, you can use `<basedir>/bdisk/bdisk.py path/to/build.ini`. The default is _/etc/bdisk/build.ini_ (plus <<the_code_build_ini_code_,other locations>>).
|
|
||||||
|
|
||||||
If you're using a packaged version you installed from your distro's package manager, you instead should run wherever it installs to. Most likely this is going to be `/usr/sbin/bdisk`. (On systemd build hosts that have done the https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/[/usr merge^], you can use `/usr/sbin/bdisk` or `/sbin/bdisk`.)
|
If you're using a packaged version you installed from your distro's package manager, you instead should run wherever it installs to. Most likely this is going to be `/usr/sbin/bdisk`. (On systemd build hosts that have done the https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/[/usr merge^], you can use `/usr/sbin/bdisk` or `/sbin/bdisk`.)
|
||||||
|
|
||||||
If you encounter any issues during the process, make sure you read the documentation -- if your issue still isn't addressed, please be sure to file a <<bug_reports_feature_requests,bug report>>!
|
If you encounter any issues during the process, make sure you read the documentation -- if your issue still isn't addressed, please be sure to file a <<bug_reports_feature_requests,bug report>>!
|
||||||
|
@ -3,7 +3,7 @@ This file is where you can specify some of the very basics of BDisk building. It
|
|||||||
|
|
||||||
It's single-level, but divided into "sections". This is unfortunately a limitation of ConfigParser, but it should be easy enough to follow.
|
It's single-level, but divided into "sections". This is unfortunately a limitation of ConfigParser, but it should be easy enough to follow.
|
||||||
|
|
||||||
Blank lines are ignored, as well as any lines beginning with `#` and `;`. There are some restrictions and recommendations for some values, so be sure to note them when they occur. Variables referencing other values in the `build.ini` are allowed in the format of `${keyname}` if it's in the same section; otherwise, `${section:keyname}` can be used.
|
Blank lines are ignored, as well as any lines beginning with `#` and `;`. There are some restrictions and recommendations for some values, so be sure to note them when they occur. Variables referencing other values in the `build.ini` are allowed in the format of `${value}` if it's in the same section; otherwise, `${section:value}` can be used.
|
||||||
|
|
||||||
If you want to use your own `build.ini` file (and you should!), the following paths are searched in order. The first one found will be used.
|
If you want to use your own `build.ini` file (and you should!), the following paths are searched in order. The first one found will be used.
|
||||||
|
|
||||||
@ -16,7 +16,6 @@ If you want to use your own `build.ini` file (and you should!), the following pa
|
|||||||
* `/opt/dev/bdisk/build.ini`
|
* `/opt/dev/bdisk/build.ini`
|
||||||
* `/opt/dev/bdisk/extra/build.ini`
|
* `/opt/dev/bdisk/extra/build.ini`
|
||||||
* `/opt/dev/bdisk/extra/dist.build.ini`
|
* `/opt/dev/bdisk/extra/dist.build.ini`
|
||||||
* `<bdisk.py directory>/../build.ini`
|
|
||||||
|
|
||||||
We'll go into more detail for each section below.
|
We'll go into more detail for each section below.
|
||||||
|
|
||||||
@ -25,7 +24,7 @@ We'll go into more detail for each section below.
|
|||||||
name = BDISK
|
name = BDISK
|
||||||
uxname = bdisk
|
uxname = bdisk
|
||||||
pname = BDisk
|
pname = BDisk
|
||||||
ver =
|
ver =
|
||||||
dev = A Developer
|
dev = A Developer
|
||||||
email = dev@domain.tld
|
email = dev@domain.tld
|
||||||
desc = A rescue/restore live environment.
|
desc = A rescue/restore live environment.
|
||||||
@ -35,29 +34,17 @@ We'll go into more detail for each section below.
|
|||||||
[user]
|
[user]
|
||||||
username = ${bdisk:uxname}
|
username = ${bdisk:uxname}
|
||||||
name = Default user
|
name = Default user
|
||||||
groups = ${bdisk:uxname},admin
|
|
||||||
password = $$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
password = $$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
||||||
[source_x86_64]
|
[build]
|
||||||
mirror = mirror.us.leaseweb.net
|
mirror = mirror.us.leaseweb.net
|
||||||
mirrorproto = https
|
mirrorproto = https
|
||||||
mirrorpath = /archlinux/iso/latest/
|
mirrorpath = /archlinux/iso/latest/
|
||||||
mirrorfile = .sig
|
mirrorfile =
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
mirrorchksum = ${mirrorpath}sha1sums.txt
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
mirrorgpgsig =
|
||||||
gpgkey = 7F2D434B9741E8AC
|
gpgkey = 7F2D434B9741E8AC
|
||||||
gpgkeyserver =
|
gpgkeyserver =
|
||||||
[source_i686]
|
gpg = no
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey =
|
|
||||||
gpgkeyserver =
|
|
||||||
[build]
|
|
||||||
dlpath = /var/tmp/${bdisk:uxname}
|
dlpath = /var/tmp/${bdisk:uxname}
|
||||||
chrootdir = /var/tmp/chroots
|
chrootdir = /var/tmp/chroots
|
||||||
basedir = /opt/dev/bdisk
|
basedir = /opt/dev/bdisk
|
||||||
@ -66,8 +53,7 @@ We'll go into more detail for each section below.
|
|||||||
prepdir = ${dlpath}/temp
|
prepdir = ${dlpath}/temp
|
||||||
archboot = ${prepdir}/${bdisk:name}
|
archboot = ${prepdir}/${bdisk:name}
|
||||||
mountpt = /mnt/${bdisk:uxname}
|
mountpt = /mnt/${bdisk:uxname}
|
||||||
multiarch = x86_64
|
multiarch = yes
|
||||||
sign = yes
|
|
||||||
ipxe = no
|
ipxe = no
|
||||||
i_am_a_racecar = no
|
i_am_a_racecar = no
|
||||||
[gpg]
|
[gpg]
|
||||||
@ -85,7 +71,7 @@ We'll go into more detail for each section below.
|
|||||||
[tftp]
|
[tftp]
|
||||||
path = ${build:dlpath}/tftpboot
|
path = ${build:dlpath}/tftpboot
|
||||||
user = root
|
user = root
|
||||||
group = root
|
group = root
|
||||||
[ipxe]
|
[ipxe]
|
||||||
iso = no
|
iso = no
|
||||||
uri = https://domain.tld
|
uri = https://domain.tld
|
||||||
@ -95,9 +81,9 @@ We'll go into more detail for each section below.
|
|||||||
ssl_crt = ${ssldir}/main.crt
|
ssl_crt = ${ssldir}/main.crt
|
||||||
ssl_key = ${ssldir}/main.key
|
ssl_key = ${ssldir}/main.key
|
||||||
[rsync]
|
[rsync]
|
||||||
host =
|
host =
|
||||||
user =
|
user =
|
||||||
path =
|
path =
|
||||||
iso = no
|
iso = no
|
||||||
|
|
||||||
=== `[bdisk]`
|
=== `[bdisk]`
|
||||||
@ -128,7 +114,7 @@ This string is used for "pretty-printing" of the project name; it should be a mo
|
|||||||
. ASCII only
|
. ASCII only
|
||||||
|
|
||||||
==== `ver`
|
==== `ver`
|
||||||
The version string. If this isn't specified, we'll try to guess based on the current git commit and tags in `<<code_basedir_code,build:basedir>>`. If `<<code_basedir_code,build:basedir>>` is *not* a git repository (i.e. you installed BDisk from a package manager), you MUST specify a version number.
|
The version string. If this isn't specified, we'll try to guess based on the current git commit and tags in `<<code_basedir_code,build:basedir>>`.
|
||||||
|
|
||||||
. No whitespace
|
. No whitespace
|
||||||
|
|
||||||
@ -193,37 +179,20 @@ What comment/description/real name should be used for the user? For more informa
|
|||||||
|
|
||||||
. ASCII only
|
. ASCII only
|
||||||
|
|
||||||
==== `groups`
|
|
||||||
What groups this user should be added to, comma-separated. They will be created if they don't exist yet. Standard *nix group names rules apply:
|
|
||||||
|
|
||||||
. ASCII only
|
|
||||||
. 32 characters or less
|
|
||||||
. Can only contain lower-case letters, numeric digits, underscores, or dashes (and can end with a dollar sign)
|
|
||||||
. Must start with a (lower-case) letter or underscore
|
|
||||||
. No whitespace
|
|
||||||
|
|
||||||
==== `password`
|
==== `password`
|
||||||
The escaped, salted, hashed string to use for the non-root user.
|
The escaped, salted, hashed string to use for the non-root user.
|
||||||
|
|
||||||
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
||||||
|
|
||||||
=== `[source_<arch>]`
|
=== `[build]`
|
||||||
This section controls where to fetch the "base" tarballs.
|
This section controls some aspects about the host and things like filesystem paths, etc.
|
||||||
|
|
||||||
NOTE: Previously, these settings were *not* architecture-specific, and included in the <<code_build_code,`build`>> section.
|
|
||||||
|
|
||||||
It was necessary to create this section per architecture, because https://www.archlinux.org/news/phasing-out-i686-support/[Arch Linux has dropped i686 support^]. However, plenty of other distros also have removed support and other third-party projects have ported. (You can find the Arch Linux 32-bit/i686 port project http://archlinux32.org/[here^].)
|
|
||||||
|
|
||||||
The directives here are only covered once, however, since both sections are identical- they just allow you to specify different mirrors. Note that the two settings are `[source_i686]` (for 32-bit) and `[source_x86_64]` (for 64-bit/multilib).
|
|
||||||
|
|
||||||
Which section is used (or both) depends on what <<code_multiarch_code, architectures you have enabled>> for the build.
|
|
||||||
|
|
||||||
==== `mirror`
|
==== `mirror`
|
||||||
A mirror that hosts the bootstrap tarball. It is *highly* recommended you use an Arch Linux https://wiki.archlinux.org/index.php/Install_from_existing_Linux#Method_A:_Using_the_bootstrap_image_.28recommended.29[bootstrap tarball^] as the build process is highly specialized to this (but <<bug_reports_feature_requests,patches/feature requests>> are welcome for other built distros). You can find a list of mirrors at the bottom of Arch's https://www.archlinux.org/download/[download page^].
|
A mirror that hosts the bootstrap tarball. It is *highly* recommended you use an Arch Linux https://wiki.archlinux.org/index.php/Install_from_existing_Linux#Method_A:_Using_the_bootstrap_image_.28recommended.29[bootstrap tarball^] as the build process is highly specialized to this (but <<bug_reports_feature_requests,patches/feature requests>> are welcome for other built distros). You can find a list of mirrors at the bottom of Arch's https://www.archlinux.org/download/[download page^].
|
||||||
|
|
||||||
. No whitespace
|
. No whitespace
|
||||||
. Must be accessible remotely/via a WAN-recognized address
|
. Must be accessible remotely/via a WAN-recognized address
|
||||||
. Must be a domain/FQDN (or IP address) only; no paths (those come later!)
|
. Must be a domain/FQDN only; no paths (those come later!)
|
||||||
|
|
||||||
==== `mirrorproto`
|
==== `mirrorproto`
|
||||||
What protocol should we use for the <<code_mirror_code,`mirror`>>?
|
What protocol should we use for the <<code_mirror_code,`mirror`>>?
|
||||||
@ -239,42 +208,14 @@ What is the path to the tarball directory on the <<code_mirror_code,`mirror`>>?
|
|||||||
. No whitespace
|
. No whitespace
|
||||||
|
|
||||||
==== `mirrorfile`
|
==== `mirrorfile`
|
||||||
What is the filename for the tarball found in the path specified in <<code_mirrorpath_code,`mirrorpath`>> ? If left blank, we will use the hash <<code_mirrorchksum_code,checksum>> file to try to guess the most recent file.
|
What is the filename for the tarball found in the path specified in <<code_mirrorpath_code,`mirrorpath`>> ? If left blank, we will use the sha1 <<code_mirrorchksum_code,checksum>> file to try to guess the most recent file.
|
||||||
|
|
||||||
==== `mirrorchksum`
|
==== `mirrorchksum`
|
||||||
*[optional]* +
|
The path to a sha1 checksum file of the bootstrap tarball.
|
||||||
*default: (no hash checking done)* +
|
|
||||||
*requires: <<code_chksumtype_code,`chksumtype`>>*
|
|
||||||
|
|
||||||
The path to a checksum file of the bootstrap tarball.
|
|
||||||
|
|
||||||
. No whitespace
|
. No whitespace
|
||||||
. Must be the full path
|
. Must be the full path
|
||||||
. Don't include the <<code_mirror_code,mirror domain>> or <<code_mirrorproto_code,protocol>>
|
. Don't include the mirror domain or protocol
|
||||||
|
|
||||||
==== `chksumtype`
|
|
||||||
The algorithm that <<code_mirrorchksum_code,`mirrorchksum`>>'s hashes are in.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
7+^|Accepts one of:
|
|
||||||
^m|blake2b
|
|
||||||
^m|blake2s
|
|
||||||
^m|md5
|
|
||||||
^m|sha1
|
|
||||||
^m|sha224
|
|
||||||
^m|sha256
|
|
||||||
^m|sha384
|
|
||||||
^m|sha512
|
|
||||||
^m|sha3_224
|
|
||||||
^m|sha3_256
|
|
||||||
^m|sha3_384
|
|
||||||
^m|sha3_512
|
|
||||||
^m|shake_128
|
|
||||||
^m|shake_256
|
|
||||||
|======================
|
|
||||||
|
|
||||||
TIP: You may have support for additional hashing algorithms, but these are the ones gauranteed to be supported by Python's https://docs.python.org/3/library/hashlib.html[hashlib module^]. To get a full list of algorithms the computer you're building on supports, you can run `python3 -c 'import hashlib;print(hashlib.algorithms_available)'`. Most likely, however, <<code_mirrorchksum_code,`mirrorchksum`>> is going to be hashes of one of the above.
|
|
||||||
|
|
||||||
==== `mirrorgpgsig`
|
==== `mirrorgpgsig`
|
||||||
*[optional]* +
|
*[optional]* +
|
||||||
@ -284,7 +225,7 @@ TIP: You may have support for additional hashing algorithms, but these are the o
|
|||||||
|
|
||||||
If the bootstrap tarball file has a GPG signature, we can use it for extra checking. If it's blank, GPG checking will be disabled.
|
If the bootstrap tarball file has a GPG signature, we can use it for extra checking. If it's blank, GPG checking will be disabled.
|
||||||
|
|
||||||
If you specify just `.sig` (or use the default and don't specify a <<code_mirrorfile_code,`mirrorfile`>>), BDisk will try to guess based on the file from the hash <<code_mirrorchksum_code,checksum>> file. Note that unless you're using the `.sig` "autodetection", this must evaluate to a full URL. (e.g. `${mirrorproto}://${mirror}${mirrorpath}somefile.sig`)
|
If you specify just `.sig` (or use the default and don't specify a <<code_mirrorfile_code,`mirrorfile`>>), BDisk will try to guess based on the file from the sha1 <<code_mirrorchksum_code,checksum>> file. Note that this must evaluate to a full URL. (e.g. `${mirrorproto}://${mirror}${mirrorpath}somefile.sig`)
|
||||||
|
|
||||||
==== `gpgkey`
|
==== `gpgkey`
|
||||||
*requires: <<optional,_gpg/gnupg_>>*
|
*requires: <<optional,_gpg/gnupg_>>*
|
||||||
@ -304,18 +245,6 @@ What is a valid keyserver we should use to fetch <<code_gpgkey_code,`gpgkey`>>?
|
|||||||
. The default (blank) is probably fine. If you don't specify a personal GPG config, then you'll most likely want to leave this blank.
|
. The default (blank) is probably fine. If you don't specify a personal GPG config, then you'll most likely want to leave this blank.
|
||||||
. If set, make sure it is a valid keyserver URI (e.g. `hkp://keys.gnupg.net`)
|
. If set, make sure it is a valid keyserver URI (e.g. `hkp://keys.gnupg.net`)
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[build]`
|
|
||||||
This section controls some aspects about the host and things like filesystem paths, etc.
|
|
||||||
|
|
||||||
|
|
||||||
==== `gpg`
|
==== `gpg`
|
||||||
Should we sign our release files? See the <<code_gpg_code_2,`[gpg]`>> section.
|
Should we sign our release files? See the <<code_gpg_code_2,`[gpg]`>> section.
|
||||||
|
|
||||||
@ -525,7 +454,7 @@ What group the HTTP files should be owned as. This is most likely going to be ei
|
|||||||
. Group must exist on build system
|
. Group must exist on build system
|
||||||
|
|
||||||
|======================
|
|======================
|
||||||
^s|Can be one of: ^.^m|groupname ^.^m|https://linux.die.net/man/5/group[GID]
|
^s|Can be one of: ^.^m|group name ^.^m|https://linux.die.net/man/5/group[UID]
|
||||||
|======================
|
|======================
|
||||||
|
|
||||||
=== `[tftp]`
|
=== `[tftp]`
|
||||||
@ -557,7 +486,7 @@ What group the TFTP files should be owned as. This is most likely going to be ei
|
|||||||
. Group must exist on build system
|
. Group must exist on build system
|
||||||
|
|
||||||
|======================
|
|======================
|
||||||
^s|Can be one of: ^.^m|groupname ^.^m|https://linux.die.net/man/5/group[GID]
|
^s|Can be one of: ^.^m|group name ^.^m|https://linux.die.net/man/5/group[UID]
|
||||||
|======================
|
|======================
|
||||||
|
|
||||||
=== `[ipxe]`
|
=== `[ipxe]`
|
||||||
|
@ -7,13 +7,13 @@ image::fig1.1.png[cgit,align="center"]
|
|||||||
|
|
||||||
If you know the tag of the commit you want, you can use curl:
|
If you know the tag of the commit you want, you can use curl:
|
||||||
|
|
||||||
curl -sL -o bdisk.tar.xz https://git.square-r00t.net/BDisk/snapshot/BDisk-3.00-BETA.tar.xz
|
curl -sL https://git.square-r00t.net/BDisk/snapshot/BDisk-4.0.0.tar.xz | tar -xf -
|
||||||
|
|
||||||
or wget:
|
or wget:
|
||||||
|
|
||||||
wget -O bdisk.tar.xz https://git.square-r00t.net/BDisk/snapshot/BDisk-3.00-BETA.tar.xz
|
wget -O - https://git.square-r00t.net/BDisk/snapshot/BDisk-3.11.tar.xz | tar -xf -
|
||||||
|
|
||||||
You can use `https://git.square-r00t.net/BDisk/snapshot/BDisk-master.tar.xz` for the URL if you want the latest working version. If you want a snapshot of a specific commit, you can use e.g. `https://git.square-r00t.net/BDisk/snapshot/BDisk-a1fe1dbc0a0ce2b2a5d1b470d30b60636f9b2efa.tar.xz` and so on.
|
You can use `https://git.square-r00t.net/BDisk/snapshot/BDisk-master.tar.xz` for the URL if you want the latest working version. If you want a snapshot of a specific commit, you can use e.g. `https://git.square-r00t.net/BDisk/snapshot/BDisk-5ac510762ce00eef213957825de0e6d07186e7f8.tar.xz` and so on.
|
||||||
|
|
||||||
Alternatively, you can use https://git-scm.com/[git^]. Git most definitely _should_ be in your distro's repositories.
|
Alternatively, you can use https://git-scm.com/[git^]. Git most definitely _should_ be in your distro's repositories.
|
||||||
|
|
||||||
@ -40,23 +40,26 @@ NOTE: Some versions may be higher than actually needed (especially _gcc_).
|
|||||||
|
|
||||||
CAUTION: You will need at least about *15GB* of free disk space, depending on what options you enable. Each architecture chroot (i.e. x86_64, i686) is about 3.5GB after a build using the default package set (more on that later), each architecture release tarball (what we use to build the chroots) is approximately 115MB each, and each squashed image per architecture is 1.1GB (if you use the default package set). If you don't understand what this means quite yet, don't worry- we'll go into more detail later on. Just know that you'll need a fair bit of free disk space.
|
CAUTION: You will need at least about *15GB* of free disk space, depending on what options you enable. Each architecture chroot (i.e. x86_64, i686) is about 3.5GB after a build using the default package set (more on that later), each architecture release tarball (what we use to build the chroots) is approximately 115MB each, and each squashed image per architecture is 1.1GB (if you use the default package set). If you don't understand what this means quite yet, don't worry- we'll go into more detail later on. Just know that you'll need a fair bit of free disk space.
|
||||||
|
|
||||||
|
==== Build Environment
|
||||||
|
* GNU/Linux (relatively recent release of preferred distro)
|
||||||
|
** Building on FreeBSD and other +*BSDs+ *may* be possible via the use of https://www.freebsd.org/doc/handbook/jails.html[jails^]. This is entirely untested and no support nor testing will be offered by the author (me). If you would like to offer documentation for this, please <<contact_the_author,contact me>>.
|
||||||
|
** Building on Windows *may* be possible via the use of the https://docs.microsoft.com/en-us/windows/wsl/install-win10[WSL (Windows Subsystem for Linux)^]. This is entirely untested and no support nor testing will be offered by the author (me). If you would like to offer documentation for this, please <<contact_the_author,contact me>>.
|
||||||
|
** Building on macOS is simply not supported, period, due to chroots being necessary to the build functionality of BDisk (and macOS not being able to implement GNU/Linux chroots). You'll need to run a build VM.
|
||||||
|
* https://www.python.org/[Python^] (>=3.6)
|
||||||
|
|
||||||
==== Necessary
|
==== Necessary
|
||||||
These are needed for using BDisk.
|
These are needed for using BDisk.
|
||||||
|
|
||||||
* https://www.python.org/[Python^] (>=3.5)
|
|
||||||
* https://github.com/dosfstools/dosfstools[dosfstools^]
|
* https://github.com/dosfstools/dosfstools[dosfstools^]
|
||||||
* http://libburnia-project.org[libisoburn^]
|
* http://libburnia-project.org[libisoburn^]
|
||||||
* http://squashfs.sourceforge.net[squashfs-tools^] (>=4.2)
|
* http://squashfs.sourceforge.net[squashfs-tools^] (>=4.2)
|
||||||
|
|
||||||
These are required Python modules:
|
These are required Python modules:
|
||||||
|
// TODO: double-check/update these.
|
||||||
|
|
||||||
* https://pypi.python.org/pypi/GitPython[GitPython^]
|
|
||||||
* https://pypi.python.org/pypi/humanize[Humanize^]
|
* https://pypi.python.org/pypi/humanize[Humanize^]
|
||||||
* http://jinja.pocoo.org/[Jinja2^]
|
* http://jinja.pocoo.org/[Jinja2^]
|
||||||
* https://pypi.python.org/pypi/psutil[PSUtil^]
|
* https://pypi.python.org/pypi/psutil[PSUtil^]
|
||||||
* https://pypi.python.org/pypi/patch[Patch^]
|
|
||||||
* https://pypi.python.org/pypi/pygpgme[PyGPGME^]
|
|
||||||
* https://pypi.python.org/pypi/pyOpenSSL[PyOpenSSL^]
|
|
||||||
* https://pypi.python.org/pypi/validators[Validators^]
|
* https://pypi.python.org/pypi/validators[Validators^]
|
||||||
|
|
||||||
==== Optional
|
==== Optional
|
||||||
@ -64,8 +67,6 @@ While not strictly necessary, these will greatly enhance your BDisk usage. I've
|
|||||||
|
|
||||||
NOTE: If you do not wish to install any of these or cannot install them, be sure to disable the relevant options in the `build.ini` file (we'll talk about that later). The default `extra/dist.build.ini` should be sane enough to not require any of these.
|
NOTE: If you do not wish to install any of these or cannot install them, be sure to disable the relevant options in the `build.ini` file (we'll talk about that later). The default `extra/dist.build.ini` should be sane enough to not require any of these.
|
||||||
|
|
||||||
* http://cdrtools.sourceforge.net/private/cdrecord.html[cdrtools^]
|
|
||||||
** Needed for building iPXE.
|
|
||||||
* http://gcc.gnu.org[gcc (multilib)^] (>=6.x)
|
* http://gcc.gnu.org[gcc (multilib)^] (>=6.x)
|
||||||
** Needed for building iPXE.
|
** Needed for building iPXE.
|
||||||
* http://gcc.gnu.org[gcc-libs (multilib)^] (>=6.x)
|
* http://gcc.gnu.org[gcc-libs (multilib)^] (>=6.x)
|
||||||
@ -77,4 +78,14 @@ NOTE: If you do not wish to install any of these or cannot install them, be sure
|
|||||||
* https://rsync.samba.org/[rsync^]
|
* https://rsync.samba.org/[rsync^]
|
||||||
** For syncing built ISOs to a fileserver, syncing to a remote iPXE server, syncing to a traditional PXE/TFTP server, etc.
|
** For syncing built ISOs to a fileserver, syncing to a remote iPXE server, syncing to a traditional PXE/TFTP server, etc.
|
||||||
|
|
||||||
|
These are optional Python modules:
|
||||||
|
|
||||||
|
* https://pypi.python.org/pypi/GitPython[GitPython^]
|
||||||
|
** (Same reasons as _git_)
|
||||||
|
* https://pypi.python.org/pypi/pygpgme[PyGPGME^]
|
||||||
|
** (Same reasons as _gpg/gnupg_)
|
||||||
|
* https://pypi.python.org/pypi/patch[Patch^]
|
||||||
|
** For branding iPXE environments per your `build.ini`.
|
||||||
|
* https://pypi.python.org/pypi/pyOpenSSL[PyOpenSSL^]
|
||||||
|
** To set up a PKI when building iPXE; used to create trusted/verified images.
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ First, create a file: `<basedir>/overlay/etc/ssh/sshd_config` using the followin
|
|||||||
Subsystem sftp /usr/lib/ssh/sftp-server
|
Subsystem sftp /usr/lib/ssh/sftp-server
|
||||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
||||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
||||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
|
||||||
|
|
||||||
We'll also want to implement a more secure `ssh_config` file to avoid possible leaks. The following is `<basedir>/overlay/etc/ssh/ssh_config`:
|
We'll also want to implement a more secure `ssh_config` file to avoid possible leaks. The following is `<basedir>/overlay/etc/ssh/ssh_config`:
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ We'll also want to implement a more secure `ssh_config` file to avoid possible l
|
|||||||
PubkeyAuthentication yes
|
PubkeyAuthentication yes
|
||||||
HostKeyAlgorithms ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-ed25519,ssh-rsa
|
HostKeyAlgorithms ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-ed25519,ssh-rsa
|
||||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
||||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
|
||||||
|
|
||||||
We'll want to create our own moduli. This can take a long time, but only needs to be done once -- it doesn't need to be done for every build. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
We'll want to create our own moduli. This can take a long time, but only needs to be done once -- it doesn't need to be done for every build. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ The `EFI/` directory is similar to <<efi, EFI/>> above also, but needs fewer con
|
|||||||
The `patches/` directory largely control branding of the mini ISO. They are in https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[unified diff^] (or "patch") format.
|
The `patches/` directory largely control branding of the mini ISO. They are in https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[unified diff^] (or "patch") format.
|
||||||
|
|
||||||
===== overlay/
|
===== overlay/
|
||||||
This directory contains *templated* overlays. These are intended to be templated by the user. See <<overlay_2, the overlay section>> for more information on how to use this. Remember to suffix your template files with the `.j2` extension.
|
This directory contains *templated* overlays. These are intended to be templated by the user. See <<overlay, the overlay section>> for more information on how to use this. Remember to suffix your template files with the `.j2` extension.
|
||||||
|
|
||||||
===== pre-build.d/
|
===== pre-build.d/
|
||||||
This directory contains *templated* overlays. These are intended to not be managed by the user, as they handle configuration necessary for building an ISO. See <<pre_build_d, the pre-build.d section>> for more information on this.
|
This directory contains *templated* overlays. These are intended to not be managed by the user, as they handle configuration necessary for building an ISO. See <<pre_build_d, the pre-build.d section>> for more information on this.
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
<?php
|
|
||||||
print '#!ipxe
|
|
||||||
|
|
||||||
cpuid --ext 29 && set bit_type 64 || set bit_type 32
|
|
||||||
initrd example.${bit_type}.img
|
|
||||||
kernel example.${bit_type}.kern initrd=example.${bit_type}.img ip=:::::eth0:dhcp archiso_http_srv=http://domain.tld/path/to/squashes/ archisobasedir=EXAMPLE archisolabel=EXAMPLE checksum=y
|
|
||||||
boot
|
|
||||||
'
|
|
||||||
?>
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
|
@ -1 +0,0 @@
|
|||||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL SQUASHED FILESYSTEM FILE. REPLACE WITH ACTUAL SQUASHED FILESYSTEM
|
|
@ -1 +0,0 @@
|
|||||||
ada655a13f53702b3fe13cae001ab14f741e10c2bb83869048d4c18e74111c12 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL SQUASHED FILESYSTEM FILE. REPLACE WITH ACTUAL SQUASHED FILESYSTEM
|
|
@ -1 +0,0 @@
|
|||||||
ada655a13f53702b3fe13cae001ab14f741e10c2bb83869048d4c18e74111c12 airootfs.sfs
|
|
8
examples/README
Normal file
8
examples/README
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
This directory contains example files/data that you may see referenced in documentation/code.
|
||||||
|
|
||||||
|
- mtree.spec
|
||||||
|
This file is an example mtree spec sheet that one may use for an overlay. It was generated by the command "mtree -c -K all -p /home/bts".
|
||||||
|
If you're on Arch, a port of mtree can be found in the AUR under the package name "nmtree" (it's maintained by the same author as BDisk!).
|
||||||
|
If you're on Debian or Ubuntu (or forks thereof), you can find it in the "freebsd-buildutils" package. (The executable is called "fmtree").
|
||||||
|
If you're on Gentoo, it's in sys-apps/mtree.
|
||||||
|
If you're on RHEL/CentOS, the "extras" repository has gomtree, which (although written in Go) should be able to produce mtree spec files (but this is unknown for certain).
|
1191
examples/mtree.spec
Normal file
1191
examples/mtree.spec
Normal file
File diff suppressed because it is too large
Load Diff
BIN
external/apacman-current.pkg.tar.xz
vendored
Normal file
BIN
external/apacman-current.pkg.tar.xz
vendored
Normal file
Binary file not shown.
BIN
external/aurman-current.pkg.tar.xz
vendored
Normal file
BIN
external/aurman-current.pkg.tar.xz
vendored
Normal file
Binary file not shown.
BIN
extra/aif.png
BIN
extra/aif.png
Binary file not shown.
Before Width: | Height: | Size: 152 B |
BIN
extra/bdisk.png
BIN
extra/bdisk.png
Binary file not shown.
Before Width: | Height: | Size: 1.1 MiB |
Binary file not shown.
@ -1,8 +0,0 @@
|
|||||||
#!/bin/env python3
|
|
||||||
import crypt
|
|
||||||
import getpass
|
|
||||||
|
|
||||||
password = getpass.getpass("\nWhat password would you like to hash/salt?\n(NOTE: will NOT echo back!)\n")
|
|
||||||
salt = crypt.mksalt(crypt.METHOD_SHA512)
|
|
||||||
salthash = crypt.crypt(password, salt)
|
|
||||||
print("\nYour salted hash is:\n\t{0}\n".format(salthash))
|
|
@ -1,103 +0,0 @@
|
|||||||
###########################################################
|
|
||||||
## BUILD.CONF SAMPLE FILE ##
|
|
||||||
###########################################################
|
|
||||||
#
|
|
||||||
# This file is used to define various variables/settings
|
|
||||||
# used by the build script.
|
|
||||||
#
|
|
||||||
# For full (perhaps overly-verbose ;) documentation, please
|
|
||||||
# see:
|
|
||||||
# https://bdisk.square-r00t.net/#_the_code_build_ini_code_file
|
|
||||||
# Or simply refer to the section titled "The build.ini File"
|
|
||||||
# in the user manual.
|
|
||||||
|
|
||||||
[bdisk]
|
|
||||||
name = BDISK
|
|
||||||
uxname = bdisk
|
|
||||||
pname = BDisk
|
|
||||||
ver =
|
|
||||||
dev = r00t^2
|
|
||||||
email = bts@square-r00t.net
|
|
||||||
desc = j00 got 0wnz0r3d lulz.
|
|
||||||
uri = https://bdisk.square-r00t.net
|
|
||||||
root_password =
|
|
||||||
user = yes
|
|
||||||
|
|
||||||
[user]
|
|
||||||
username = ${bdisk:uxname}
|
|
||||||
name = Default user
|
|
||||||
groups = ${bdisk:uxname},admin
|
|
||||||
password =
|
|
||||||
|
|
||||||
[source_x86_64]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey = 7F2D434B9741E8AC
|
|
||||||
gpgkeyserver =
|
|
||||||
|
|
||||||
[source_i686]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey = 7F2D434B9741E8AC
|
|
||||||
gpgkeyserver =
|
|
||||||
|
|
||||||
[build]
|
|
||||||
gpg = no
|
|
||||||
dlpath = /var/tmp/${bdisk:uxname}
|
|
||||||
chrootdir = /var/tmp/chroots
|
|
||||||
basedir = /opt/dev/bdisk
|
|
||||||
isodir = ${dlpath}/iso
|
|
||||||
srcdir = ${dlpath}/src
|
|
||||||
prepdir = ${dlpath}/temp
|
|
||||||
archboot = ${prepdir}/${bdisk:name}
|
|
||||||
mountpt = /mnt/${bdisk:uxname}
|
|
||||||
#multiarch = yes
|
|
||||||
multiarch = x86_64
|
|
||||||
sign = yes
|
|
||||||
ipxe = no
|
|
||||||
i_am_a_racecar = yes
|
|
||||||
|
|
||||||
[gpg]
|
|
||||||
mygpgkey =
|
|
||||||
mygpghome = ${build:dlpath}/.gnupg
|
|
||||||
|
|
||||||
[sync]
|
|
||||||
http = no
|
|
||||||
tftp = no
|
|
||||||
git = no
|
|
||||||
rsync = no
|
|
||||||
|
|
||||||
[http]
|
|
||||||
path = ${build:dlpath}/http
|
|
||||||
user = http
|
|
||||||
group = http
|
|
||||||
|
|
||||||
[tftp]
|
|
||||||
path = ${build:dlpath}/tftpboot
|
|
||||||
user = root
|
|
||||||
group = root
|
|
||||||
|
|
||||||
[ipxe]
|
|
||||||
iso = no
|
|
||||||
uri = https://bdisk.square-r00t.net
|
|
||||||
ssldir = ${build:dlpath}/ssl
|
|
||||||
ssl_ca = ${ssldir}/ca.crt
|
|
||||||
ssl_cakey = ${ssldir}/ca.key
|
|
||||||
ssl_crt = ${ssldir}/main.crt
|
|
||||||
ssl_key = ${ssldir}/main.key
|
|
||||||
|
|
||||||
[rsync]
|
|
||||||
host =
|
|
||||||
user =
|
|
||||||
path =
|
|
||||||
iso = yes
|
|
3
extra/external/SMC_DumpKey/README
vendored
3
extra/external/SMC_DumpKey/README
vendored
@ -1,3 +0,0 @@
|
|||||||
taken with graces to http://www.contrib.andrew.cmu.edu/~somlo/OSXKVM/
|
|
||||||
|
|
||||||
Needs to be compiled on linux with gcc, and only runs on genuine Apple hardware (as it polls the SMC chip for the given value)
|
|
193
extra/external/SMC_DumpKey/SmcDumpKey.c
vendored
193
extra/external/SMC_DumpKey/SmcDumpKey.c
vendored
@ -1,193 +0,0 @@
|
|||||||
/*
|
|
||||||
* prints out 4-character name of the SMC key at given index position;
|
|
||||||
*
|
|
||||||
* by Gabriel L. Somlo <somlo@cmu.edu>, Summer 2014
|
|
||||||
*
|
|
||||||
* Compile with: gcc -O2 -o SmcDumpKey SmcDumpKey.c -Wall
|
|
||||||
*
|
|
||||||
* You probably want to "modprobe -r applesmc" before running this...
|
|
||||||
*
|
|
||||||
* Code bits and pieces shamelessly ripped from the linux kernel driver
|
|
||||||
* (drivers/hwmon/applesmc.c by N. Boichat and H. Rydberg)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License v2 as published by the
|
|
||||||
* Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License along with
|
|
||||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <ctype.h>
|
|
||||||
#include <sys/io.h>
|
|
||||||
#include <linux/byteorder/little_endian.h>
|
|
||||||
|
|
||||||
|
|
||||||
#define APPLESMC_START 0x300
|
|
||||||
#define APPLESMC_RANGE 0x20
|
|
||||||
|
|
||||||
#define APPLESMC_DATA_PORT (APPLESMC_START + 0x00)
|
|
||||||
#define APPLESMC_CMD_PORT (APPLESMC_START + 0x04)
|
|
||||||
|
|
||||||
#define APPLESMC_READ_CMD 0x10
|
|
||||||
#define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12
|
|
||||||
#define APPLESMC_GET_KEY_TYPE_CMD 0x13
|
|
||||||
|
|
||||||
|
|
||||||
/* wait up to 128 ms for a status change. */
|
|
||||||
#define APPLESMC_MIN_WAIT 0x0010
|
|
||||||
#define APPLESMC_RETRY_WAIT 0x0100
|
|
||||||
#define APPLESMC_MAX_WAIT 0x20000
|
|
||||||
|
|
||||||
|
|
||||||
#define APPLESMC_KEY_NAME_LEN 4
|
|
||||||
#define APPLESMC_KEY_TYPE_LEN 4
|
|
||||||
|
|
||||||
typedef struct key_type {
|
|
||||||
uint8_t data_len;
|
|
||||||
uint8_t data_type[APPLESMC_KEY_TYPE_LEN];
|
|
||||||
uint8_t flags;
|
|
||||||
} __attribute__((packed)) key_type;
|
|
||||||
|
|
||||||
|
|
||||||
/* wait_read - Wait for a byte to appear on SMC port. */
|
|
||||||
static int
|
|
||||||
wait_read(void)
|
|
||||||
{
|
|
||||||
uint8_t status;
|
|
||||||
int us;
|
|
||||||
|
|
||||||
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
|
|
||||||
usleep(us);
|
|
||||||
status = inb(APPLESMC_CMD_PORT);
|
|
||||||
/* read: wait for smc to settle */
|
|
||||||
if (status & 0x01)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "wait_read() fail: 0x%02x\n", status);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*send_byte - Write to SMC port, retrying when necessary. */
|
|
||||||
static int
|
|
||||||
send_byte(uint8_t cmd, unsigned short port)
|
|
||||||
{
|
|
||||||
uint8_t status;
|
|
||||||
int us;
|
|
||||||
|
|
||||||
outb(cmd, port);
|
|
||||||
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
|
|
||||||
usleep(us);
|
|
||||||
status = inb(APPLESMC_CMD_PORT);
|
|
||||||
/* write: wait for smc to settle */
|
|
||||||
if (status & 0x02)
|
|
||||||
continue;
|
|
||||||
/* ready: cmd accepted, return */
|
|
||||||
if (status & 0x04)
|
|
||||||
return 0;
|
|
||||||
/* timeout: give up */
|
|
||||||
if (us << 1 == APPLESMC_MAX_WAIT)
|
|
||||||
break;
|
|
||||||
/* busy: long wait and resend */
|
|
||||||
usleep(APPLESMC_RETRY_WAIT);
|
|
||||||
outb(cmd, port);
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr,
|
|
||||||
"send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
send_argument(const uint8_t *key)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < APPLESMC_KEY_NAME_LEN; i++)
|
|
||||||
if (send_byte(key[i], APPLESMC_DATA_PORT))
|
|
||||||
return -1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
read_smc(uint8_t cmd, const uint8_t *key, uint8_t *buf, uint8_t len)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (send_byte(cmd, APPLESMC_CMD_PORT) || send_argument(key)) {
|
|
||||||
fprintf(stderr, "%.4s: read arg fail\n", key);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (send_byte(len, APPLESMC_DATA_PORT)) {
|
|
||||||
fprintf(stderr, "%.4s: read len fail\n", key);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
|
||||||
if (wait_read()) {
|
|
||||||
fprintf(stderr, "%.4s: read data[%d] fail\n", key, i);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
buf[i] = inb(APPLESMC_DATA_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int
|
|
||||||
main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
key_type kt;
|
|
||||||
uint8_t data_buf[UCHAR_MAX];
|
|
||||||
uint8_t i;
|
|
||||||
|
|
||||||
if (argc != 2 || strlen(argv[1]) != APPLESMC_KEY_NAME_LEN) {
|
|
||||||
fprintf(stderr, "\nUsage: %s <4-char-key-name>\n\n", argv[0]);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ioperm(APPLESMC_START, APPLESMC_RANGE, 1) != 0) {
|
|
||||||
perror("ioperm failed");
|
|
||||||
return -2;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (read_smc(APPLESMC_GET_KEY_TYPE_CMD,
|
|
||||||
(uint8_t *)argv[1], (uint8_t *)&kt, sizeof(kt)) != 0) {
|
|
||||||
fprintf(stderr, "\nread_smc get_key_type error\n\n");
|
|
||||||
return -3;
|
|
||||||
}
|
|
||||||
printf(" type=\"");
|
|
||||||
for (i = 0; i < APPLESMC_KEY_TYPE_LEN; i++)
|
|
||||||
printf(isprint(kt.data_type[i]) ? "%c" : "\\x%02x",
|
|
||||||
(uint8_t)kt.data_type[i]);
|
|
||||||
printf("\" length=%d flags=%x\n", kt.data_len, kt.flags);
|
|
||||||
|
|
||||||
if (read_smc(APPLESMC_READ_CMD,
|
|
||||||
(uint8_t *)argv[1], data_buf, kt.data_len) != 0) {
|
|
||||||
fprintf(stderr, "\nread_smc get_key_data error\n\n");
|
|
||||||
return -4;
|
|
||||||
}
|
|
||||||
printf(" data=\"");
|
|
||||||
for (i = 0; i < kt.data_len; i++)
|
|
||||||
printf(isprint(data_buf[i]) ? "%c" : "\\x%02x",
|
|
||||||
(uint8_t)data_buf[i]);
|
|
||||||
printf("\"\n");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,48 +0,0 @@
|
|||||||
# The modules found in here are for distro-specific differences in the builds.
|
|
||||||
# For instance, if you want to build a Debian-based BDisk, you'd specify pkg['install'] = ['apt-get', '-y', 'install', '%PKG%'],
|
|
||||||
# name this file as "debian.py", and set bdisk:distro as 'debian'.
|
|
||||||
# Note that the guest will need python installed. If distro is set as "NOCHECK", a distro check of the tarball won't be performed
|
|
||||||
# (as the distro check requires python be installed first).
|
|
||||||
|
|
||||||
# Special variables to be used in strings:
|
|
||||||
# %PKG% = the name of a package would be inserted here.
|
|
||||||
|
|
||||||
# This template uses Debian as an example.
|
|
||||||
|
|
||||||
# The name of the distro. Must match the output from platform.linux_distribution()[0].lower()
|
|
||||||
# Regex is supported.
|
|
||||||
distro = 'debian'
|
|
||||||
|
|
||||||
# The path to python. Can be either python 2.x (2.6 or higher) or 3.x.
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
|
|
||||||
guestenv = {}
|
|
||||||
# The following environment variables will be set for the guest.
|
|
||||||
guestenv['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
|
|
||||||
scripts = {}
|
|
||||||
# This variable can be used to perform some additional system tweaks and such. This is run before package installation.
|
|
||||||
# It must be formatted as a complete script- i.e. include a shebang etc.
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This variable can be used to perform some additional system tweaks and such. This is run after package installation.
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
pkg = {}
|
|
||||||
# The command, with arguments, in list format that should be run before we install software in the guest.
|
|
||||||
# For instance, if your guest distro requires a local package listing cache (nearly all of them do) to be
|
|
||||||
# updated first, this is where it would be run.
|
|
||||||
pkg['pre'] = ['apt-get', '-y', 'update']
|
|
||||||
|
|
||||||
# The command, with arguments, in a list format to install a package.
|
|
||||||
# Note that the command must be constructed in a way that does not require user interaction.
|
|
||||||
pkg['install'] = ['apt-get', '-y', 'install', '%PKG%']
|
|
||||||
|
|
||||||
# The command, with arguments, in list format to use to check if a package is installed.
|
|
||||||
# It should return 0 on exist status if it's installed. Any other exit status assumes the package is not installed.
|
|
||||||
pkg['check'] = ['dpkg-query', '-f', '${binary:Package}\n', '-W', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'arch'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['pacman', '-Syyy']
|
|
||||||
pkg['install'] = ['apacman', '-S', '%PKG%']
|
|
||||||
pkg['check'] = ['pacman', '-Q', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'centos linux'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,12 +0,0 @@
|
|||||||
distro = 'debian'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
guestenv['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['apt-get', '-q', '-y', 'update']
|
|
||||||
pkg['install'] = ['apt-get', '-q', '-y', '-o Dpkg::Options::="--force-confdef"', '-o Dpkg::Options::="--force-confold"', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['dpkg-query', '-f', "'${binary:Package}\n'", '-W', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'fedora'
|
|
||||||
pybin = '/usr/bin/python3'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'red hat enterprise linux (server|desktop)'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'suse linux enterprise server'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['zypper', 'refresh']
|
|
||||||
pkg['install'] = ['zypper', 'install', '--no-confirm', '-l', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,12 +0,0 @@
|
|||||||
distro = 'ubuntu'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
guestenv['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['apt-get', '-q', '-y', 'update']
|
|
||||||
pkg['install'] = ['apt-get', '-q', '-y', '-o Dpkg::Options::="--force-confdef"', '-o Dpkg::Options::="--force-confold"', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['dpkg-query', '-f', "'${binary:Package}\n'", '-W', '%PKG']
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user