stubbing out 4.x

This commit is contained in:
2018-05-08 22:26:17 -04:00
parent aaf03db8bd
commit c6a837d1fe
167 changed files with 30 additions and 8034 deletions

0
bdisk/BIOS.py Normal file
View File

0
bdisk/GPG.py Normal file
View File

0
bdisk/SSL.py Normal file
View File

0
bdisk/UEFI.py Normal file
View File

0
bdisk/__init__.py Normal file
View File

View File

@@ -1,228 +0,0 @@
import os
from io import BytesIO
import subprocess
import datetime
import jinja2
import gpgme
import psutil
def genGPG(conf):
# https://media.readthedocs.org/pdf/pygpgme/latest/pygpgme.pdf
build = conf['build']
dlpath = build['dlpath']
bdisk = conf['bdisk']
gpghome = conf['gpg']['mygpghome']
distkeys = []
gpgkeyserver = []
for a in conf['build']['arch']:
keysrv = conf['src'][a]['gpgkeyserver']
distkey = conf['src'][a]['gpgkey']
if keysrv and (keysrv not in gpgkeyserver):
gpgkeyserver.append(keysrv)
if distkey and(distkey not in distkeys):
distkeys.append(distkey)
templates_dir = '{0}/extra/templates'.format(build['basedir'])
mykey = False
pkeys = []
killStaleAgent(conf)
if conf['gpg']['mygpgkey'] != '':
mykey = conf['gpg']['mygpgkey']
if gpghome == '':
# Let's try the default.
gpghome = '{0}/.gnupg'.format(os.path.expanduser("~"))
else:
# No key ID was specified.
if gpghome == '':
# We'll generate a key if we can't find one here.
gpghome = build['dlpath'] + '/.gnupg'
killStaleAgent(conf)
os.environ['GNUPGHOME'] = gpghome
gpg = gpgme.Context()
# do we need to add a keyserver?
if len(gpgkeyserver) != 0:
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
for s in gpgkeyserver:
if os.path.isfile(dirmgr):
with open(dirmgr, 'r+') as f:
findme = any(s in line for line in f)
if not findme:
f.seek(0, os.SEEK_END)
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
bdisk['pname'],
s))
if mykey:
try:
pkeys.append(gpg.get_key(mykey, True))
except:
exit('{0}: ERROR: You specified using {1} but we have no secret key for that ID!'.format(
datetime.datetime.now(),
mykey))
else:
for key in gpg.keylist(None, True):
if key.can_sign:
pkeys.append(key)
break
if len(pkeys) == 0:
print("{0}: [GPG] Generating a GPG key...".format(datetime.datetime.now()))
loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = loader)
tpl = env.get_template('GPG.j2')
tpl_out = tpl.render(build = build, bdisk = bdisk)
privkey = gpg.get_key(gpg.genkey(tpl_out).fpr, True)
pkeys.append(privkey)
# do we need to add a keyserver? this is for the freshly-generated GNUPGHOME
if len(gpgkeyserver) != 0:
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
for s in gpgkeyserver:
with open(dirmgr, 'r+') as f:
findme = any(s in line for line in f)
if not findme:
f.seek(0, os.SEEK_END)
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
bdisk['pname'],
s))
gpg.signers = pkeys
# Now we try to find and add the key for the base image.
gpg.keylist_mode = gpgme.KEYLIST_MODE_EXTERN # remote (keyserver)
if len(distkeys) > 0: # testing
for k in distkeys:
key = gpg.get_key(k)
importkey = key.subkeys[0].fpr
gpg.keylist_mode = gpgme.KEYLIST_MODE_LOCAL # local keyring (default)
DEVNULL = open(os.devnull, 'w')
print('{0}: [GPG] Importing {1} and signing it for verification purposes...'.format(
datetime.datetime.now(),
distkey))
cmd = ['/usr/bin/gpg',
'--recv-keys',
'--batch',
'--yes',
'0x{0}'.format(importkey)]
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
sigkeys = []
for i in gpg.get_key(importkey).subkeys:
sigkeys.append(i.fpr)
cmd = ['/usr/bin/gpg',
'--batch',
'--yes',
'--lsign-key',
'0x{0}'.format(importkey)]
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
# We need to expose this key to the chroots, too, so we need to export it.
with open('{0}/gpgkey.pub'.format(dlpath), 'wb') as f:
gpg.export(pkeys[0].subkeys[0].keyid, f)
return(gpg)
def killStaleAgent(conf):
# Kill off any stale GPG agents running.
# Probably not even needed, but good to have.
chrootdir = conf['build']['chrootdir']
gpgpath = conf['gpg']['mygpghome']
procs = psutil.process_iter()
plst = []
for p in procs:
if (p.name() in ('gpg-agent', 'dirmngr') and p.uids()[0] == os.getuid()):
pd = psutil.Process(p.pid).as_dict()
for d in (chrootdir, gpgpath):
if pd['cwd'].startswith('{0}'.format(d)):
plst.append(p.pid)
if len(plst) >= 1:
for p in plst:
psutil.Process(p).terminate()
def signIMG(path, conf):
if conf['build']['sign']:
# Do we want to kill off any stale gpg-agents? (So we spawn a new one)
# Requires further testing.
#killStaleAgent()
gpg = conf['gpgobj']
print('{0}: [GPG] Signing {1}...'.format(
datetime.datetime.now(),
path))
# May not be necessary; further testing necessary
#if os.getenv('GPG_AGENT_INFO'):
# del os.environ['GPG_AGENT_INFO']
gpg = conf['gpgobj']
# ASCII-armor (.asc)
gpg.armor = True
data_in = open(path, 'rb')
sigbuf = BytesIO()
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
_ = sigbuf.seek(0)
_ = data_in.seek(0)
data_in.close()
with open('{0}.asc'.format(path), 'wb') as f:
f.write(sigbuf.read())
print('{0}: [GPG] Wrote {1}.asc (ASCII-armored signature).'.format(
datetime.datetime.now(),
path))
# Binary signature (.sig)
gpg.armor = False
data_in = open(path, 'rb')
sigbuf = BytesIO()
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
_ = sigbuf.seek(0)
_ = data_in.seek(0)
data_in.close()
with open('{0}.sig'.format(path), 'wb') as f:
f.write(sigbuf.read())
print('{0}: [GPG] Wrote {1}.sig (binary signature).'.format(
datetime.datetime.now(),
path))
def gpgVerify(sigfile, datafile, conf):
gpg = conf['gpgobj']
fullkeys = []
print('{0}: [GPG] Verifying {1} with {2}...'.format(
datetime.datetime.now(),
datafile,
sigfile))
keylst = gpg.keylist()
for k in keylst:
fullkeys.append(k.subkeys[0].fpr)
with open(sigfile,'rb') as s:
with open(datafile, 'rb') as f:
sig = gpg.verify(s, f, None)
for x in sig:
if x.validity <= 1:
if not x.validity_reason:
reason = 'we require a signature trust of 2 or higher'
else:
reason = x.validity_reason
print('{0}: [GPG] Key {1} failed to verify: {2}'.format(
datetime.datetime.now(),
x.fpr,
reason))
verified = False
skeys = []
for k in sig:
skeys.append(k.fpr)
if k.fpr in fullkeys:
verified = True
break
else:
pass
if verified:
print('{0}: [GPG] {1} verified (success).'.format(
datetime.datetime.now(),
datafile))
else:
print('{0}: [GPG] {1} failed verification!'.format(
datetime.datetime.now(),
datafile))
return(verified)
def delTempKeys(conf):
# Create a config option to delete these.
# It's handy to keep these keys, but I'd understand if
# people didn't want to use them.
gpg = conf['gpgobj']
if conf['gpg']:
keys = []
if conf['gpgkey'] != '':
keys.append(gpg.get_key(conf['gpgkey']))
if conf['mygpghome'] == '':
keys.append(gpg.get_key(None, True)) # this is safe; we generated our own
for k in keys:
gpg.delete(k)
killStaleAgent(conf)

View File

@@ -1,196 +0,0 @@
import OpenSSL
import os
import shutil
import datetime
import re
def verifyCert(cert, key, CA = None):
# Verify a given certificate against a certificate.
# Optionally verify against a CA certificate as well (Hopefully. If/when PyOpenSSL ever supports it.)
chk = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
chk.use_privatekey(key)
chk.use_certificate(cert)
try:
chk.check_privatekey()
except OpenSSL.SSL.Error:
return(False)
exit(("{0}: {1} does not match {2}!".format(datetime.datetime.now(), key, cert)))
else:
print("{0}: [SSL] Verified {1} against {2} successfully.".format(datetime.datetime.now(), key, cert))
return(True)
# This is disabled because there doesn't seem to currently be any way
# to actually verify certificates against a given CA.
#if CA:
# try:
# magic stuff here
def sslCAKey(conf):
# TODO: use path from conf, even if it doesn't exist?
# if it does, read it into a pkey object
keyfile = conf['ipxe']['ssl_cakey']
if os.path.isfile(keyfile):
try:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
open(keyfile).read())
except:
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
datetime.datetime.now(),
keyfile))
else:
key = OpenSSL.crypto.PKey()
print("{0}: [SSL] Generating SSL CA key...".format(datetime.datetime.now()))
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
with open(keyfile, 'wb') as f:
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
return(key)
def sslCA(conf, key = None):
# NOTE: 'key' is a pkey OBJECT, not a file.
keyfile = conf['ipxe']['ssl_cakey']
crtfile = conf['ipxe']['ssl_ca']
if not key:
if os.path.isfile(keyfile):
try:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
open(keyfile).read())
except:
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
datetime.datetime.now(),
keyfile))
else:
exit('{0}: ERROR: We need a key to generate a CA certificate!'.format(
datetime.datetime.now()))
if os.path.isfile(crtfile):
try:
ca = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
open(crtfile).read())
except:
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL certificate.'.format(
datetime.datetime.now(),
crtfile))
else:
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
conf['ipxe']['uri'],
flags=re.IGNORECASE)).lower()
# http://www.pyopenssl.org/en/stable/api/crypto.html#pkey-objects
# http://docs.ganeti.org/ganeti/2.14/html/design-x509-ca.html
ca = OpenSSL.crypto.X509()
ca.set_version(3)
ca.set_serial_number(1)
#ca.get_subject().CN = domain
ca.get_subject().CN = '{0} CA'.format(conf['bdisk']['name'])
ca.gmtime_adj_notBefore(0)
# valid for ROUGHLY 10 years. years(ish) * days * hours * mins * secs.
# the paramater is in seconds, which is why we need to multiply them all together.
ca.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
ca.set_issuer(ca.get_subject())
ca.set_pubkey(key)
ca.add_extensions([
OpenSSL.crypto.X509Extension(b"basicConstraints",
True,
b"CA:TRUE, pathlen:0"),
OpenSSL.crypto.X509Extension(b"keyUsage",
True,
b"keyCertSign, cRLSign"),
OpenSSL.crypto.X509Extension(b"subjectKeyIdentifier",
False,
b"hash",
subject = ca),])
ca.sign(key, "sha512")
with open(crtfile, 'wb') as f:
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
return(ca)
def sslCKey(conf):
keyfile = conf['ipxe']['ssl_key']
if os.path.isfile(keyfile):
try:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
open(keyfile).read())
except:
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
datetime.datetime.now(),
keyfile))
else:
key = OpenSSL.crypto.PKey()
print("{0}: [SSL] Generating SSL Client key...".format(datetime.datetime.now()))
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
with open(keyfile, 'wb') as f:
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
return(key)
def sslCSR(conf, key = None):
# NOTE: 'key' is a pkey OBJECT, not a file.
keyfile = conf['ipxe']['ssl_key']
crtfile = conf['ipxe']['ssl_crt']
if not key:
if os.path.isfile(keyfile):
try:
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
open(keyfile).read())
except:
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
datetime.datetime.now(),
keyfile))
else:
exit('{0}: ERROR: We need a key to generate a CSR!'.format(
datetime.datetime.now()))
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
conf['ipxe']['uri'],
flags=re.IGNORECASE)).lower()
csr = OpenSSL.crypto.X509Req()
csr.get_subject().CN = domain
#req.get_subject().countryName = 'xxx'
#req.get_subject().stateOrProvinceName = 'xxx'
#req.get_subject().localityName = 'xxx'
#req.get_subject().organizationName = 'xxx'
#req.get_subject().organizationalUnitName = 'xxx'
csr.set_pubkey(key)
csr.sign(key, "sha512")
with open('/tmp/main.csr', 'wb') as f:
f.write(OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_PEM, csr))
return(csr)
def sslSign(conf, ca, key, csr):
#ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, ca)
#ca_key = OpenSSL.crypto.load_privatekey(key)
#req = OpenSSL.crypto.load_certificate_request(csr)
csr = OpenSSL.crypto.load_certificate_request(OpenSSL.crypto.FILETYPE_PEM,
open("/tmp/main.csr").read())
cert = OpenSSL.crypto.X509()
cert.set_subject(csr.get_subject())
cert.set_serial_number(1)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(24 * 60 * 60)
cert.set_issuer(ca.get_subject())
cert.set_pubkey(csr.get_pubkey())
#cert.set_pubkey(ca.get_pubkey())
cert.sign(key, "sha512")
with open(conf['ipxe']['ssl_crt'], 'wb') as f:
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
return(cert)
def sslPKI(conf):
# run checks for everything, gen what's missing
ssldir = conf['ipxe']['ssldir']
os.makedirs(ssldir, exist_ok = True)
certfile = conf['ipxe']['ssl_crt']
key = sslCAKey(conf)
ca = sslCA(conf, key = key)
ckey = sslCKey(conf)
if os.path.isfile(certfile):
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
open(certfile).read())
if not verifyCert(cert, ckey):
csr = sslCSR(conf, ckey)
cert = sslSign(conf, ca, key, csr)
else:
csr = sslCSR(conf, ckey)
cert = sslSign(conf, ca, key, csr)
return(cert)

View File

@@ -1,156 +0,0 @@
import os
import sys
import psutil
import subprocess
import datetime
import tarfile
import humanize
import shutil
def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
# MOUNT the chroot
mountpoints = psutil.disk_partitions(all = True)
mounts = []
for m in mountpoints:
mounts.append(m.mountpoint)
cmounts = {}
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
cmounts[m] = None
# chroot (bind mount... onto itself. it's so stupid, i know. see https://bugs.archlinux.org/task/46169)
if chrootdir not in mounts:
cmounts['chroot'] = ['/bin/mount',
'--bind',
chrootdir,
chrootdir]
# resolv
if (chrootdir + '/etc/resolv.conf') not in mounts:
cmounts['resolv'] = ['/bin/mount',
'--bind',
'-o', 'ro',
'/etc/resolv.conf',
chrootdir + '/etc/resolv.conf']
# proc
if (chrootdir + '/proc') not in mounts:
cmounts['proc'] = ['/bin/mount',
'-t', 'proc',
'-o', 'nosuid,noexec,nodev',
'proc',
chrootdir + '/proc']
# sys
if (chrootdir + '/sys') not in mounts:
cmounts['sys'] = ['/bin/mount',
'-t', 'sysfs',
'-o', 'nosuid,noexec,nodev,ro',
'sys',
chrootdir + '/sys']
# efi (if it exists on the host)
if '/sys/firmware/efi/efivars' in mounts:
if (chrootdir + '/sys/firmware/efi/efivars') not in mounts:
cmounts['efi'] = ['/bin/mount',
'-t', 'efivarfs',
'-o', 'nosuid,noexec,nodev',
'efivarfs',
chrootdir + '/sys/firmware/efi/efivars']
# dev
if (chrootdir + '/dev') not in mounts:
cmounts['dev'] = ['/bin/mount',
'-t', 'devtmpfs',
'-o', 'mode=0755,nosuid',
'udev',
chrootdir + '/dev']
# pts
if (chrootdir + '/dev/pts') not in mounts:
cmounts['pts'] = ['/bin/mount',
'-t', 'devpts',
'-o', 'mode=0620,gid=5,nosuid,noexec',
'devpts',
chrootdir + '/dev/pts']
# shm (if it exists on the host)
if '/dev/shm' in mounts:
if (chrootdir + '/dev/shm') not in mounts:
cmounts['shm'] = ['/bin/mount',
'-t', 'tmpfs',
'-o', 'mode=1777,nosuid,nodev',
'shm',
chrootdir + '/dev/shm']
# run (if it exists on the host)
if '/run' in mounts:
if (chrootdir + '/run') not in mounts:
cmounts['run'] = ['/bin/mount',
'-t', 'tmpfs',
'-o', 'nosuid,nodev,mode=0755',
'run',
chrootdir + '/run']
# tmp (if it exists on the host)
if '/tmp' in mounts:
if (chrootdir + '/tmp') not in mounts:
cmounts['tmp'] = ['/bin/mount',
'-t', 'tmpfs',
'-o', 'mode=1777,strictatime,nodev,nosuid',
'tmp',
chrootdir + '/tmp']
# the order we mount here is VERY IMPORTANT. Sure, we could do "for m in cmounts:", but dicts aren't ordered until python 3.6
# and this is SO important it's best that we be explicit as possible while we're still in alpha/beta stage. TODO?
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
if cmounts[m]:
subprocess.call(cmounts[m])
print("{0}: [CHROOT] Running '{1}' ({2}). PROGRESS: tail -f {2}/var/log/chroot_install.log ...".format(
datetime.datetime.now(),
cmd,
chrootdir))
real_root = os.open("/", os.O_RDONLY)
os.chroot(chrootdir)
os.system('/root/pre-build.sh')
os.fchdir(real_root)
os.chroot('.')
os.close(real_root)
if not os.path.isfile('{0}/sbin/init'.format(chrootdir)):
os.symlink('../lib/systemd/systemd', '{0}/sbin/init'.format(chrootdir))
return(chrootdir)
def chrootUnmount(chrootdir):
subprocess.call(['umount', '-lR', chrootdir])
def chrootTrim(build):
chrootdir = build['chrootdir']
arch = build['arch']
for a in arch:
# Compress the pacman and apacman caches.
for i in ('pacman', 'apacman'):
shutil.rmtree('{0}/root.{1}/var/cache/{2}'.format(chrootdir, a, i))
os.makedirs('{0}/root.{1}/usr/local/{2}'.format(chrootdir, a, i), exist_ok = True)
tarball = '{0}/root.{1}/usr/local/{2}/{2}.db.tar.xz'.format(chrootdir, a, i)
dbdir = '{0}/root.{1}/var/lib/{2}/local'.format(chrootdir, a, i)
if os.path.isdir(dbdir):
print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format(
datetime.datetime.now(),
chrootdir + '/root.' + a,
i))
if os.path.isfile(tarball):
os.remove(tarball)
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
tar.add(dbdir, arcname = os.path.basename(dbdir))
shutil.rmtree(dbdir, ignore_errors = True)
print("{0}: [CHROOT] Created {1} ({2}). {3} cleared.".format(
datetime.datetime.now(),
tarball,
humanize.naturalsize(
os.path.getsize(tarball)),
dbdir))
#for d in ('etc/pacman.d/gnupg', 'var/empty/.gnupg'): # actually, we should probably keep these.
# they don't take much space, and it's a PITA to pacman-key --init && pacman-key --populate again on boot.
# if os.path.isdir('{0}/root.{1}/{2}'.format(chrootdir, a, d)):
# shutil.rmtree('{0}/root.{1}/{2}'.format(chrootdir, a, d))
# TODO: move the self-cleanup in pre-build.sh to here.
delme = [#'/root/.gnupg', # see above
'/root/.bash_history',
#'/var/log/chroot_install.log', # disable for now. maybe always disable if debug is enabled? TODO.
'/.git',
'/root/.viminfo']
for i in delme:
fullpath = '{0}/root.{1}{2}'.format(chrootdir, a, i)
if os.path.isfile(fullpath):
os.remove(fullpath)
elif os.path.isdir(fullpath):
shutil.rmtree(fullpath, ignore_errors = True)

View File

@@ -1,71 +0,0 @@
#!/bin/env python3
import argparse
import host
import prep
import bchroot
import build
import datetime
import bSSL
import ipxe
import bsync
import bGPG
import os
def bdisk(args):
# we also need to figure out how to implement "mentos" (old bdisk) like functionality, letting us reuse an
# existing chroot install if possible to save time for future builds.
# if not, though, it's no big deal.
if os.getuid() != 0:
exit('{0}: ERROR: BDisk *must* be run as the root user or with sudo!'.format(datetime.datetime.now()))
print('{0}: Starting.'.format(datetime.datetime.now()))
conf = host.parseConfig(host.getConfig(conf_file = args['buildini']))[1]
prep.dirChk(conf)
conf['gpgobj'] = bGPG.genGPG(conf)
prep.buildChroot(conf, keep = False)
prep.prepChroot(conf)
arch = conf['build']['arch']
bGPG.killStaleAgent(conf)
for a in arch:
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
prep.postChroot(conf)
bchroot.chrootTrim(conf['build'])
build.genImg(conf)
build.genUEFI(conf['build'], conf['bdisk'])
fulliso = build.genISO(conf)
bGPG.signIMG(fulliso['Main']['file'], conf)
build.displayStats(fulliso)
if conf['build']['ipxe']:
bSSL.sslPKI(conf)
ipxe.buildIPXE(conf)
iso = ipxe.genISO(conf)
if iso:
for x in iso.keys():
if x != 'name':
path = iso[x]['file']
bGPG.signIMG(path, conf)
build.displayStats(iso)
bsync.http(conf)
bsync.tftp(conf)
bsync.git(conf)
bsync.rsync(conf)
print('{0}: Finish.'.format(datetime.datetime.now()))
def parseArgs():
args = argparse.ArgumentParser(description = 'BDisk - a tool for building live/rescue media.',
epilog = 'brent s. || 2017 || https://bdisk.square-r00t.net')
args.add_argument('buildini',
metavar = '/path/to/build.ini',
default = '/etc/bdisk/build.ini',
nargs = '?',
help = 'The full/absolute path to the build.ini to use for this run. The default is /etc/bdisk/build.ini, but see https://bdisk.square-r00t.net/#the_code_build_ini_code_file.')
return(args)
def main():
args = vars(parseArgs().parse_args())
bdisk(args)
if __name__ == '__main__':
main()

View File

@@ -1,187 +0,0 @@
import shutil
import os
import pwd
import grp
import datetime
import git
import subprocess
def http(conf):
http = conf['http']
build = conf['build']
prepdir = build['prepdir']
arch = build['arch']
bdisk = conf['bdisk']
if conf['sync']['http']:
uid = pwd.getpwnam(http['user'])[2]
gid = grp.getgrnam(http['group'])[2]
httpdir = http['path']
archboot = build['archboot']
# remove the destination if it exists
if os.path.isdir(httpdir):
print('{0}: [HTTP] Removing {1}...'.format(
datetime.datetime.now(),
httpdir))
shutil.rmtree(httpdir)
# just to make it again. we do this to avoid file existing conflicts.
os.makedirs(httpdir)
# here we build a dict of files to copy and their destination paths.
httpfiles = {}
print('{0}: [HTTP] (Boot files) => {1}...'.format(
datetime.datetime.now(),
httpdir))
for a in arch:
for i in ('md5', 'sfs', 'sha256', 'sha512'):
httpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
httpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
if 'x86_64' in arch:
httpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.64.kern'.format(bdisk['uxname'])
httpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.64.img'.format(bdisk['uxname'])
if 'i686' in arch:
httpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
httpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
httpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
# and now the magic.
for k in httpfiles.keys():
destpath = httpfiles[k]
fulldest = '{0}/{1}'.format(httpdir, destpath)
parentdir = os.path.split(fulldest)[0]
os.makedirs(parentdir, exist_ok = True)
if os.path.lexists('{0}/{1}'.format(prepdir, k)):
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(httpdir, httpfiles[k]))
for root, dirs, files in os.walk(httpdir):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
def tftp(conf):
# TODO: pxelinux cfg
tftp = conf['tftp']
build = conf['build']
prepdir = build['prepdir']
arch = build['arch']
bdisk = conf['bdisk']
if conf['sync']['tftp']:
uid = pwd.getpwnam(tftp['user'])[2]
gid = grp.getgrnam(tftp['group'])[2]
tftpdir = tftp['path']
# remove the destination if it exists
if os.path.isdir(tftpdir):
print('{0}: [TFTP] Removing {1}...'.format(
datetime.datetime.now(),
tftpdir))
shutil.rmtree(tftpdir)
# and we make it again
os.makedirs(tftpdir)
# and make a dict of the files etc.
tftpfiles = {}
print('{0}: [TFTP] (Boot files) => {1}...'.format(
datetime.datetime.now(),
tftpdir))
for a in arch:
for i in ('md5', 'sfs', 'sha256', 'sha512'):
tftpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
tftpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
if 'x86_64' in arch:
tftpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.kern'.format(bdisk['uxname'])
tftpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.img'.format(bdisk['uxname'])
if 'i686' in arch:
tftpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
tftpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
tftpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
# and now the magic.
for k in tftpfiles.keys():
destpath = tftpfiles[k]
fulldest = '{0}/{1}'.format(tftpdir, destpath)
parentdir = os.path.split(fulldest)[0]
os.makedirs(parentdir, exist_ok = True)
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(tftpdir, tftpfiles[k]))
for root, dirs, files in os.walk(tftpdir):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
def git(conf):
build = conf['build']
git_name = conf['bdisk']['dev']
git_email = conf['bdisk']['email']
if conf['sync']['git']:
print('{0}: [GIT] Creating commit...'.format(datetime.datetime.now()))
repo = git.Repo(build['basedir'])
repo.git.add('--all')
repo.index.commit("automated commit from BDisk (git:sync)")
print('{0}: [GIT] Pushing to remote...'.format(datetime.datetime.now()))
repo.remotes.origin.push()
def rsync(conf):
# TODO: just copy tftpbooting pxelinux.cfg (to be generated) if tftp,
# and do nothing if http- copying over three copies of the squashed filesystems
# is a waste of time, bandwidth, and disk space on target.
build = conf['build']
prepdir = build['prepdir']
isodir = build['isodir']
arch = build['arch']
rsync = conf['rsync']
sync = conf['sync']
server = rsync['host']
path = rsync['path']
user = rsync['user']
locpath = False
if sync['rsync']:
# TODO: some sort of debugging/logging
cmd = ['/usr/bin/rsync',
'-a',
'-q',
'-z',
locpath,
'{0}@{1}:{2}/.'.format(user, server, path)]
#if sync['http']: # TODO: rsync:http to enable this
# cmd[4] = conf['http']['path']
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
# datetime.datetime.now(),
# cmd[4],
# server))
# subprocess.call(cmd)
#if sync['tftp']:
# cmd[4] = conf['tftp']['path']
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
# datetime.datetime.now(),
# cmd[4],
# server))
# subprocess.call(cmd)
if conf['ipxe']:
cmd[4] = build['archboot']
print('{0}: [RSYNC] {1} => {2}...'.format(
datetime.datetime.now(),
cmd[4],
server))
subprocess.call(cmd)
cmd[4] = '{0}/boot'.format(build['prepdir'])
subprocess.call(cmd)
if conf['rsync']['iso']:
cmd[4] = isodir
print('{0}: [RSYNC] {1} => {2}...'.format(
datetime.datetime.now(),
cmd[4],
server))
subprocess.call(cmd)
# Now we copy some extra files.
prebuild_dir = '{0}/extra/pre-build.d'.format(build['basedir'])
rsync_files = ['{0}/VERSION_INFO.txt'.format(prepdir),
'{0}/root/packages.both'.format(prebuild_dir),
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
for x in rsync_files:
cmd[4] = x
subprocess.call(cmd)
# And we grab the remaining, since we need to rename them.
for a in arch:
cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)
subprocess.call(cmd)
cmd[4] = '{0}/{1}/root/iso.pkgs.arch'.format(prebuild_dir, a)
cmd[5] = '{0}@{1}:{2}/iso.pkgs.{3}'.format(user, server, path, a)
subprocess.call(cmd)

View File

@@ -1,416 +0,0 @@
import os
import tarfile
import shutil
import glob
import subprocess
import hashlib
import psutil
import jinja2
import humanize
import datetime
import bGPG # bdisk.bGPG
from urllib.request import urlopen
def genImg(conf):
bdisk = conf['bdisk']
build = conf['build']
arch = build['arch']
chrootdir = build['chrootdir']
archboot = build['archboot']
basedir = build['basedir']
prepdir = build['prepdir']
hashes = {}
hashes['sha512'] = {}
hashes['sha256'] = {}
hashes['md5'] = {}
squashfses = []
for a in arch:
if a == 'i686':
bitness = '32'
elif a == 'x86_64':
bitness = '64'
# Create the squashfs image
airoot = archboot + '/' + a + '/'
squashimg = airoot + 'airootfs.sfs'
os.makedirs(airoot, exist_ok = True)
print("{0}: [BUILD] Squashing filesystem ({1})...".format(
datetime.datetime.now(),
chrootdir + '/root.' + a))
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
# /dev/null.
DEVNULL = open(os.devnull, 'w')
cmd = ['/usr/bin/mksquashfs',
chrootdir + '/root.' + a,
squashimg,
'-no-progress',
'-noappend',
'-comp', 'xz']
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
print("{0}: [BUILD] Generated {1} ({2}).".format(
datetime.datetime.now(),
squashimg,
humanize.naturalsize(
os.path.getsize(squashimg))))
# Generate the checksum files
print("{0}: [BUILD] Generating SHA512 SHA256, MD5 checksums ({1})...".format(
datetime.datetime.now(),
squashimg))
hashes['sha512'][a] = hashlib.sha512()
hashes['sha256'][a] = hashlib.sha256()
hashes['md5'][a] = hashlib.md5()
with open(squashimg, 'rb') as f:
while True:
stream = f.read(65536) # 64kb chunks
if not stream:
break
# NOTE: these items are hashlib objects, NOT strings!
hashes['sha512'][a].update(stream)
hashes['sha256'][a].update(stream)
hashes['md5'][a].update(stream)
with open(airoot + 'airootfs.sha512', 'w+') as f:
f.write("{0} airootfs.sfs\n".format(hashes['sha512'][a].hexdigest()))
with open(airoot + 'airootfs.sha256', 'w+') as f:
f.write("{0} airootfs.sfs\n".format(hashes['sha256'][a].hexdigest()))
with open(airoot + 'airootfs.md5', 'w+') as f:
f.write("{0} airootfs.sfs\n".format(hashes['md5'][a].hexdigest()))
squashfses.append('{0}'.format(squashimg))
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
# Logo
os.makedirs(prepdir + '/boot', exist_ok = True)
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
else:
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
# Kernels, initrds...
# We use a dict here so we can use the right filenames...
# I might change how I handle this in the future.
bootfiles = {}
#bootfiles['kernel'] = ['vmlinuz-linux-' + bdisk['name'], '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
bootfiles['kernel'] = ['vmlinuz-linux', '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
#bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
bootfiles['initrd'] = ['initramfs-linux.img', '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
for x in ('kernel', 'initrd'):
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(prepdir, bootfiles[x][1]))
for i in squashfses:
bGPG.signIMG(i, conf)
def genUEFI(build, bdisk):
arch = build['arch']
# 32-bit EFI implementations are nigh nonexistant.
# We don't really need to worry about them.
# Plus there's always multiarch.
# I can probably do this better with a dict... TODO.
if 'x86_64' in arch:
prepdir = build['prepdir']
basedir = build['basedir']
chrootdir = build['chrootdir']
mountpt = build['mountpt']
templates_dir = build['basedir'] + '/extra/templates'
efidir = '{0}/EFI/{1}'.format(prepdir, bdisk['name'])
os.makedirs(efidir, exist_ok = True)
efiboot_img = efidir + '/efiboot.img'
os.makedirs(prepdir + '/EFI/boot', exist_ok = True)
os.makedirs(efidir, exist_ok = True)
## Download the EFI shells if we don't have them.
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
if not os.path.isfile(prepdir + '/EFI/shellx64_v2.efi'):
shell2_path = prepdir + '/EFI/shellx64_v2.efi'
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell2_path))
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
shell2_fetch = urlopen(shell2_url)
with open(shell2_path, 'wb+') as dl:
dl.write(shell2_fetch.read())
shell2_fetch.close()
# Shell for older versions (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=Efi-shell)
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
if not os.path.isfile(prepdir + '/EFI/shellx64_v1.efi'):
shell1_path = prepdir + '/EFI/shellx64_v1.efi'
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell1_path))
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
shell1_fetch = urlopen(shell1_url)
with open(shell1_path, 'wb+') as dl:
dl.write(shell1_fetch.read())
shell1_fetch.close()
print("{0}: [BUILD] Building UEFI support...".format(datetime.datetime.now()))
## But wait! That's not all! We need more binaries.
# Looks like these are in the "efitools" package now.
for f in ('PreLoader.efi', 'HashTool.efi'):
if f == 'PreLoader.efi':
fname = 'bootx64.efi'
else:
fname = f
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
chrootdir,
f),
'rb') as r:
with open('{0}/EFI/boot/{1}'.format(prepdir, fname), 'wb') as file:
file.write(r.read())
# And we also need the systemd efi bootloader.
if os.path.isfile(prepdir + '/EFI/boot/loader.efi'):
os.remove(prepdir + '/EFI/boot/loader.efi')
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
chrootdir),
'rb') as r:
with open('{0}/EFI/boot/loader.efi'.format(prepdir), 'wb') as file:
file.write(r.read())
# And the accompanying configs for the systemd efi bootloader, too.
tpl_loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = tpl_loader)
os.makedirs(prepdir + '/loader/entries', exist_ok = True)
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
if t == 'base':
fname = bdisk['uxname'] + '.conf'
elif t not in ('uefi1', 'uefi2'):
fname = t + '.conf'
else:
fname = bdisk['uxname'] + '_' + t + '.conf'
if t == 'loader':
tplpath = prepdir + '/loader/'
fname = 'loader.conf' # we change the var from above because it's an oddball.
else:
tplpath = prepdir + '/loader/entries/'
tpl = env.get_template('EFI/' + t + '.conf.j2')
tpl_out = tpl.render(build = build, bdisk = bdisk)
with open(tplpath + fname, "w+") as f:
f.write(tpl_out)
# And we need to get filesizes (in bytes) for everything we need to include in the ESP.
# This is more important than it looks.
sizetotal = 33553920 # The spec'd EFI binary size (32MB). It's okay to go over this though (and we do)
# because xorriso sees it as a filesystem image and adjusts the ISO automagically.
#sizetotal = 2097152 # we start with 2MB and add to it for wiggle room
sizefiles = ['/boot/' + bdisk['uxname'] + '.64.img',
'/boot/' + bdisk['uxname'] + '.64.kern',
'/EFI/boot/bootx64.efi',
'/EFI/boot/loader.efi',
'/EFI/boot/HashTool.efi',
'/EFI/shellx64_v1.efi',
'/EFI/shellx64_v2.efi']
for i in sizefiles:
sizetotal += os.path.getsize(prepdir + i)
# Loader configs
for (path, dirs, files) in os.walk(prepdir + '/loader/'):
for file in files:
fname = os.path.join(path, file)
sizetotal += os.path.getsize(fname)
# And now we create the EFI binary filesystem image/binary...
print("{0}: [BUILD] Creating EFI ESP image {2} ({1})...".format(
datetime.datetime.now(),
humanize.naturalsize(sizetotal),
efiboot_img))
if os.path.isfile(efiboot_img):
os.remove(efiboot_img)
with open(efiboot_img, 'wb+') as f:
f.truncate(sizetotal)
DEVNULL = open(os.devnull, 'w')
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', bdisk['name'] + '_EFI', efiboot_img]
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
cmd = ['/bin/mount', efiboot_img, build['mountpt']]
subprocess.call(cmd)
os.makedirs('{0}/EFI/{1}'.format(build['mountpt'], bdisk['name']))
os.makedirs(build['mountpt'] + '/EFI/boot')
os.makedirs(build['mountpt'] + '/loader/entries')
# Ready for some deja vu? This is because it uses an embedded version as well for hybrid ISO.
# I think.
# TODO: just move this to a function instead, with "efi" as a param and change
# the templates to use "if efi == 'yes'" instead.
# function should set the "installation" path for the conf as well based on the value of efi
# parameter.
env = jinja2.Environment(loader = tpl_loader)
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
if t == 'base':
fname = bdisk['uxname'] + '.conf'
elif t in ('uefi1', 'uefi2'):
fname = t + '.conf'
else:
fname = bdisk['uxname'] + '_' + t + '.conf'
if t == 'loader':
tplpath = build['mountpt'] + '/loader/'
fname = 'loader.conf' # we change the var from above because it's an oddball.
else:
tplpath = build['mountpt'] + '/loader/entries/'
tpl = env.get_template('EFI/' + t + '.conf.j2')
tpl_out = tpl.render(build = build, bdisk = bdisk, efi = 'yes')
with open(tplpath + fname, "w+") as f:
f.write(tpl_out)
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi'):
y = prepdir + '/EFI/boot/' + x
z = mountpt + '/EFI/boot/' + x
if os.path.isfile(z):
os.remove(z)
shutil.copy(y, z)
for x in ('shellx64_v1.efi', 'shellx64_v2.efi'):
y = prepdir + '/EFI/' + x
z = mountpt + '/EFI/' + x
if os.path.isfile(z):
os.remove(z)
shutil.copy(y, z)
shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux'.format(chrootdir, 'x86_64'),
'{0}/EFI/{1}/{2}.efi'.format(mountpt, bdisk['name'], bdisk['uxname']))
shutil.copy2('{0}/root.{1}/boot/initramfs-linux.img'.format(chrootdir, 'x86_64'),
'{0}/EFI/{1}/{2}.img'.format(mountpt, bdisk['name'], bdisk['uxname']))
# TODO: support both arch's as EFI bootable instead? Maybe? requires more research. very rare.
#shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, a, bdisk['name']),
# '{0}/EFI/{1}/{2}.{3}.efi'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
#shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, a, bdisk['uxname']),
# '{0}/EFI/{1}/{2}.{3}.img'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
cmd = ['/bin/umount', mountpt]
subprocess.call(cmd)
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
print('{0}: [BUILD] Built EFI binary.'.format(datetime.datetime.now()))
return(efiboot_img)
def genISO(conf):
build = conf['build']
bdisk = conf['bdisk']
archboot = build['archboot']
prepdir = build['prepdir']
templates_dir = build['basedir'] + '/extra/templates'
arch = build['arch']
builddir = prepdir + '/' + bdisk['name']
extradir = build['basedir'] + '/extra/'
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
# TODO: can we use syslinux for EFI too instead of prebootloader?
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
sysl_tmp = prepdir + '/isolinux/'
ver = bdisk['ver']
if len(arch) == 1:
isofile = '{0}-{1}-{2}-{3}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'], arch[0])
else:
isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
isopath = build['isodir'] + '/' + isofile
arch = build['arch']
# In case we're building a single-arch ISO...
if len(arch) == 1:
isolinux_cfg = '/BIOS/isolinux.cfg.arch.j2'
if arch[0] == 'i686':
bitness = '32'
efi = False
elif arch[0] == 'x86_64':
bitness = '64'
efi = True
else:
isolinux_cfg = '/BIOS/isolinux.cfg.multi.j2'
bitness = False
efi = True
if os.path.isfile(isopath):
os.remove(isopath)
if archboot != prepdir + '/' + bdisk['name']: # best to use static concat here...
if os.path.isdir(builddir):
shutil.rmtree(builddir, ignore_errors = True)
shutil.copytree(archboot, builddir)
if build['ipxe']:
ipxe = conf['ipxe']
if ipxe['iso']:
minifile = '{0}-{1}-mini.iso'.format(bdisk['uxname'], bdisk['ver'])
minipath = build['isodir'] + '/' + minifile
if ipxe['usb']:
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
minipath = build['isodir'] + '/' + usbfile
# Copy isolinux files
print("{0}: [BUILD] Staging ISO preparation...".format(datetime.datetime.now()))
isolinux_files = ['isolinux.bin',
'vesamenu.c32',
'linux.c32',
'reboot.c32']
# TODO: implement debugging mode in bdisk
#if debug:
# isolinux_files[0] = 'isolinux-debug.bin'
os.makedirs(sysl_tmp, exist_ok = True)
for f in isolinux_files:
if os.path.isfile(sysl_tmp + f):
os.remove(sysl_tmp + f)
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
ifisolinux_files = ['ldlinux.c32',
'libcom32.c32',
'libutil.c32',
'ifcpu64.c32']
for f in ifisolinux_files:
if os.path.isfile(sysl_tmp + f):
os.remove(sysl_tmp + f)
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
tpl_loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = tpl_loader)
tpl = env.get_template(isolinux_cfg)
tpl_out = tpl.render(build = build, bdisk = bdisk, bitness = bitness)
with open(sysl_tmp + '/isolinux.cfg', "w+") as f:
f.write(tpl_out)
# And we need to build the ISO!
# TODO: only include UEFI support if we actually built it!
print("{0}: [BUILD] Building full ISO ({1})...".format(datetime.datetime.now(), isopath))
if efi:
cmd = ['/usr/bin/xorriso',
'-as', 'mkisofs',
'-iso-level', '3',
'-full-iso9660-filenames',
'-volid', bdisk['name'],
'-appid', bdisk['desc'],
'-publisher', bdisk['dev'],
'-preparer', 'prepared by ' + bdisk['dev'],
'-eltorito-boot', 'isolinux/isolinux.bin',
'-eltorito-catalog', 'isolinux/boot.cat',
'-no-emul-boot',
'-boot-load-size', '4',
'-boot-info-table',
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
'-eltorito-alt-boot',
'-e', 'EFI/' + bdisk['name'] + '/efiboot.img',
'-no-emul-boot',
'-isohybrid-gpt-basdat',
'-output', isopath,
prepdir]
else:
# UNTESTED. TODO.
# I think i want to also get rid of: -boot-load-size 4,
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
cmd = ['/usr/bin/xorriso',
'-as', 'mkisofs',
'-iso-level', '3',
'-full-iso9660-filenames',
'-volid', bdisk['name'],
'-appid', bdisk['desc'],
'-publisher', bdisk['dev'],
'-preparer', 'prepared by ' + bdisk['dev'],
'-eltorito-boot', 'isolinux/isolinux.bin',
'-eltorito-catalog', 'isolinux/boot.cat',
'-no-emul-boot',
'-boot-load-size', '4',
'-boot-info-table',
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
'-no-emul-boot',
'-isohybrid-gpt-basdat',
'-output', isopath,
prepdir]
DEVNULL = open(os.devnull, 'w')
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
# Get size of ISO
iso = {}
iso['name'] = ['Main']
iso['Main'] = {}
iso['Main']['sha'] = hashlib.sha256()
with open(isopath, 'rb') as f:
while True:
stream = f.read(65536) # 64kb chunks
if not stream:
break
iso['Main']['sha'].update(stream)
iso['Main']['sha'] = iso['Main']['sha'].hexdigest()
iso['Main']['file'] = isopath
iso['Main']['size'] = humanize.naturalsize(os.path.getsize(isopath))
iso['Main']['type'] = 'Full'
iso['Main']['fmt'] = 'Hybrid ISO'
return(iso)
def displayStats(iso):
for i in iso['name']:
print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))
print('\t\t\t = Size: {0}'.format(iso[i]['size']))
print('\t\t\t = SHA256: {0}'.format(iso[i]['sha']))
print('\t\t\t = Location: {0}'.format(iso[i]['file']))
def cleanUp():
# TODO: clear out all of prepdir?
pass

0
bdisk/chroot.py Normal file
View File

0
bdisk/confparse.py Normal file
View File

0
bdisk/env_prep.py Normal file
View File

0
bdisk/git.py Normal file
View File

View File

@@ -1,194 +0,0 @@
import os
import sys
import platform
import re
import glob
import configparser
import validators
import git
import datetime
from socket import getaddrinfo
def getOS():
# Returns one of: SuSE, debian, fedora, redhat, centos, mandrake,
# mandriva, rocks, slackware, yellowdog, gentoo, UnitedLinux,
# turbolinux, arch, mageia
distro = list(platform.linux_distribution())[0].lower()
return(distro)
def getBits():
bits = list(platform.architecture())[0]
return(bits)
def getHostname():
hostname = platform.node()
return(hostname)
def getConfig(conf_file = '/etc/bdisk/build.ini'):
conf = False
# define some defailt conf paths in case we're installed by
# a package manager. in order of the paths we should search.
currentdir = os.path.dirname(os.path.realpath(__file__))
currentdir_user = os.path.abspath('{0}/../build.ini'.format(currentdir))
currentdir_def = os.path.abspath('{0}/../extra/dist.build.ini'.format(currentdir))
default_conf_paths = ['/etc/bdisk/build.ini',
'/usr/share/bdisk/build.ini',
'/usr/share/bdisk/extra/build.ini',
'/usr/share/docs/bdisk/build.ini', # this is the preferred installation path for packagers
'/usr/local/etc/bdisk/build.ini',
'/usr/local/share/docs/bdisk/build.ini',
'/opt/dev/bdisk/build.ini',
'/opt/dev/bdisk/extra/build.ini',
'/opt/dev/bdisk/extra/dist.build.ini',
currentdir_user]
# if we weren't given one/using the default...
if conf_file == '/etc/bdisk/build.ini':
if not os.path.isfile(conf_file):
for p in default_conf_paths:
if os.path.isfile(p):
conf = p
break
else:
conf = conf_file
else:
conf = conf_file
defconf = os.path.abspath('{0}/../extra/dist.build.ini'.format(os.path.dirname(os.path.realpath(__file__))))
if not conf:
# okay, so let's check for distributed/"blank" ini's
# since we can't seem to find one.
dist_conf_paths = [re.sub('(build\.ini)','dist.\\1', s) for s in default_conf_paths]
for q in dist_conf_paths:
if os.path.isfile(q):
conf = q
break
if os.path.isfile(default_conf_paths[4]):
defconf = default_conf_paths[4]
confs = [defconf, conf]
return(confs)
def parseConfig(confs):
config = configparser.ConfigParser()
config._interpolation = configparser.ExtendedInterpolation()
config.read(confs)
# a dict makes this so much easier.
config_dict = {s:dict(config.items(s)) for s in config.sections()}
# Convert the booleans to pythonic booleans in the dict...
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
config_dict['build']['ipxe'] = config['build'].getboolean('ipxe')
config_dict['build']['sign'] = config['build'].getboolean('sign')
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb')
config_dict['sync']['git'] = config['sync'].getboolean('git')
config_dict['sync']['http'] = config['sync'].getboolean('http')
config_dict['sync']['rsync'] = config['sync'].getboolean('rsync')
config_dict['sync']['tftp'] = config['sync'].getboolean('tftp')
config_dict['rsync']['iso'] = config['rsync'].getboolean('iso')
# Get the version...
# Two possibilities.
# e.g. 1 commit after tag with 7-digit object hex: ['v3.10', '1', 'gb4a5e40']
# Or if were sitting on a tag with no commits: ['v3.10']
# So we want our REAL version to be the following:
# Tagged release: v#.##
# X number of commits after release: v#.##rX
# Both have the (local) build number appended to the deliverables,
# which is reset for an empty isodir OR a new tagged release (incl.
# commits on top of a new tagged release). e.g. for build Y:
# v#.##-Y or v#.##rX-Y
if config_dict['bdisk']['ver'] == '':
try:
repo = git.Repo(config_dict['build']['basedir'])
refs = repo.git.describe(repo.head.commit).split('-')
if len(refs) >= 2:
config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[1]
else:
config_dict['bdisk']['ver'] = refs[0]
except:
exit(('{0}: ERROR: {1} is NOT a valid git repository, and you did not specify bdisk:ver in your build.ini! ' +
'Did you perhaps install from a package manager? Please refer to the documentation.').format(datetime.datetime.now(),
config_dict['build']['basedir']))
# And the build number.
# TODO: support tracking builds per version. i.e. in buildnum:
# v2.51r13:0
# v2.51r17:3
if os.path.isfile(config_dict['build']['dlpath'] + '/buildnum'):
with open(config_dict['build']['dlpath'] + '/buildnum', 'r') as f:
config_dict['build']['buildnum'] = int(f.readlines()[0])
else:
config_dict['build']['buildnum'] = 0
# But logically we should start the build over at 0 if we don't have any existing ISO's.
if os.path.isdir(config_dict['build']['isodir']):
if os.listdir(config_dict['build']['isodir']) == []:
config_dict['build']['buildnum'] = 0
# ...or if we don't have any previous builds for this ISO version.
elif not glob.glob('{0}/*v{1}r*.iso'.format(config_dict['build']['isodir'], config_dict['bdisk']['ver'])):
config_dict['build']['buildnum'] = 0
# and build a list of arch(es) we want to build
if config_dict['build']['multiarch'] in ('','yes','true','1','no','false','0'):
config_dict['build']['arch'] = ['x86_64','i686']
elif config_dict['build']['multiarch'] in ('x86_64','64','no32'):
config_dict['build']['arch'] = ['x86_64']
elif config_dict['build']['multiarch'] in ('i686','32','no64'):
config_dict['build']['arch'] = ['i686']
else:
exit(('{0}: ERROR: {1} is not a valid value. Check your configuration.').format(
datetime.datetime.now(),
config_dict['build']['multiarch']))
## VALIDATORS ##
# Validate bootstrap mirror
config_dict['src'] = {}
for a in config_dict['build']['arch']:
config_dict['src'][a] = config_dict['source_' + a]
if (validators.domain(config_dict['src'][a]['mirror']) or validators.ipv4(
config_dict['src'][a]['mirror']) or validatords.ipv6(
config_dict['src'][a]['mirror'])):
try:
getaddrinfo(config_dict['src'][a]['mirror'], None)
except:
exit(('{0}: ERROR: {1} does not resolve and cannot be used as a ' +
'mirror for the bootstrap tarballs. Check your configuration.').format(
datetime.datetime.now(),
config_dict['src'][a]['host']))
config_dict['src'][a]['gpg'] = config['source_' + a].getboolean('gpg')
# Are we rsyncing? If so, validate the rsync host.
# Works for IP address too. It does NOT check to see if we can
# actually *rsync* to it; that'll come later.
if config_dict['sync']['rsync']:
if (validators.domain(config_dict['rsync']['host']) or validators.ipv4(
config_dict['rsync']['host']) or validators.ipv6(
config_dict['rsync']['host'])):
try:
getaddrinfo(config_dict['rsync']['host'], None)
except:
exit(('{0}: ERROR: {1} does not resolve and cannot be used for rsyncing.' +
'Check your configuration.').format(
datetime.datetime.now(),
config_dict['rsync']['host']))
else:
exit(('{0}: ERROR: {1} is not a valid host and cannot be used for rsyncing.' +
'Check your configuration.').format(
datetime.datetime.now(),
config_dict['rsync']['host']))
# Validate the URI.
if config_dict['build']['ipxe']:
# so this won't validate e.g. custom LAN domains (https://pxeserver/bdisk.php). TODO.
if not validators.url(config_dict['ipxe']['uri']):
if not re.match('^https?://localhost(/.*)?$'):
exit('{0}: ERROR: {1} is not a valid URL/URI. Check your configuration.'.format(
datetime.datetime.now(),
config_dict['ipxe']['uri']))
# Validate required paths
if not os.path.exists(config_dict['build']['basedir'] + '/extra'):
exit(("{0}: ERROR: {1} does not contain BDisk's core files!" +
"Check your configuration.").format(
datetime.datetime.now(),
config_dict['build']['basedir']))
# Make dirs if they don't exist
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
os.makedirs(config_dict['build'][d], exist_ok = True)
# Make dirs for sync staging if we need to
for x in ('http', 'tftp'):
if config_dict['sync'][x]:
os.makedirs(config_dict[x]['path'], exist_ok = True)
return(config, config_dict)

0
bdisk/iPXE.py Normal file
View File

View File

@@ -1,304 +0,0 @@
import os
import shutil
import re
import subprocess
import jinja2
import git
import patch
import datetime
import humanize
import hashlib
def buildIPXE(conf):
build = conf['build']
bdisk = conf['bdisk']
ipxe = conf['ipxe']
mini = ipxe['iso']
prepdir = conf['build']['prepdir']
templates_dir = build['basedir'] + '/extra/templates'
ipxe_tpl = templates_dir + '/iPXE'
srcdir = build['srcdir']
embedscript = build['dlpath'] + '/EMBED'
ipxe_src = srcdir + '/ipxe'
#ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
ipxe_git_uri = 'http://git.ipxe.org/ipxe.git'
print('{0}: [IPXE] Prep/fetch sources...'.format(
datetime.datetime.now()))
# Get the source
if os.path.isdir(ipxe_src):
shutil.rmtree(ipxe_src)
ipxe_repo = git.Repo.clone_from(ipxe_git_uri, ipxe_src)
# Generate patches
tpl_loader = jinja2.FileSystemLoader(ipxe_tpl)
env = jinja2.Environment(loader = tpl_loader)
tpl = env.get_template('EMBED.j2')
tpl_out = tpl.render(ipxe = ipxe)
with open(embedscript, 'w+') as f:
f.write(tpl_out)
# Feature enabling
# In config/general.h
with open('{0}/src/config/general.h'.format(ipxe_src), 'r') as f:
generalconf = f.read()
# And in config/console.h
with open('{0}/src/config/console.h'.format(ipxe_src), 'r') as f:
consoleconf = f.read()
patterns = (('^#undef(\s*NET_PROTO_IPV6.*)$','#define\g<1>'), # enable IPv6
('^#undef(\s*DOWNLOAD_PROTO_HTTPS)','#define\g<1>'), # enable HTTPS
('^//(#define\s*IMAGE_TRUST_CMD)','\g<1>'), # moar HTTPS
('^#undef(\s*DOWNLOAD_PROTO_FTP)','#define\g<1>')) # enable FTP
#('^//(#define\s*CONSOLE_CMD)','\g<1>'), # BROKEN in EFI? TODO. if enable, replace } with , above etc.
#('^//(#define\s*IMAGE_PNG','\g<1>'), # SAME, broken in EFI? TODO.
#console = ('^//(#define\s*CONSOLE_VESAFB)','\g<1>') # BROKEN in EFI? TODO.
# https://stackoverflow.com/a/4427835
# https://emilics.com/notebook/enblog/p869.html
# The above methods don't seem to work. it craps out on the pattern matchings
# so we use tuples instead.
for x in patterns:
generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
with open('{0}/src/config/general.h'.format(ipxe_src), 'w') as f:
f.write(generalconf)
# Uncomment when we want to test the above consdict etc.
#for x in patterns:
# generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
#with open('{0}/src/config/console.h'.format(ipxe_src), 'w') as f:
# f.write(console)
# Now we make!
cwd = os.getcwd()
os.chdir(ipxe_src + '/src')
modenv = os.environ.copy()
modenv['EMBED'] = embedscript
#modenv['TRUST'] = ipxe_ssl_ca # TODO: test these
#modenv['CERT'] = '{0},{1}'.format(ipxe_ssl_ca, ipxe_ssl_crt) # TODO: test these
#modenv['PRIVKEY'] = ipxe_ssl_ckey # TODO: test these
build_cmd = {}
build_cmd['base'] = ['/usr/bin/make',
'all',
'EMBED={0}'.format(embedscript)]
# TODO: copy the UNDI stuff/chainloader to tftpboot, if enabled
build_cmd['undi'] = ['/usr/bin/make',
'bin/ipxe.pxe',
'EMBED={0}'.format(embedscript)]
build_cmd['efi'] = ['/usr/bin/make',
'bin-i386-efi/ipxe.efi',
'bin-x86_64-efi/ipxe.efi',
'EMBED={0}'.format(embedscript)]
# Now we call the commands.
DEVNULL = open(os.devnull, 'w')
if os.path.isfile(build['dlpath'] + '/ipxe.log'):
os.remove(build['dlpath'] + '/ipxe.log')
print(('{0}: [IPXE] Building iPXE ({1}). PROGRESS: tail -f {2}/ipxe.log ...').format(
datetime.datetime.now(),
ipxe_src,
build['dlpath']))
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
subprocess.call(build_cmd['base'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
subprocess.call(build_cmd['undi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
os.chdir(cwd)
def genISO(conf):
build = conf['build']
bdisk = conf['bdisk']
ipxe = conf['ipxe']
arch = build['arch']
dlpath = build['dlpath']
ver = bdisk['ver']
isodir = build['isodir']
isofile = '{0}-{1}-{2}.mini.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
isopath = '{0}/{1}'.format(isodir, isofile)
prepdir = build['prepdir']
chrootdir = build['chrootdir']
mini = ipxe['iso']
iso = {}
srcdir = build['srcdir']
ipxe_src = srcdir + '/ipxe'
mountpt = build['mountpt']
templates_dir = build['basedir'] + '/extra/templates/iPXE/'
tpl_loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = tpl_loader)
bootdir = '{0}/ipxe_mini'.format(dlpath)
efiboot_img = '{0}/EFI/{1}/efiboot.img'.format(bootdir, bdisk['name'])
innerefi64 = '{0}/src/bin-x86_64-efi/ipxe.efi'.format(ipxe_src)
efi = False
# this shouldn't be necessary... if it is, we can revisit this in the future. see "Inner dir" below.
#innerefi32 = '{0}/src/bin-i386-efi/ipxe.efi'.format(ipxe_src)
# We only need to do EFI prep if we have UEFI/x86_64 support. See above, but IA64 is dead, Zed.
if mini and (('x86_64') in arch):
efi = True
# EFI prep/building
print('{0}: [IPXE] UEFI support for Mini ISO...'.format(datetime.datetime.now()))
if os.path.isdir(bootdir):
shutil.rmtree(bootdir)
os.makedirs(os.path.dirname(efiboot_img), exist_ok = True) # FAT32 embedded EFI dir
os.makedirs('{0}/EFI/boot'.format(bootdir), exist_ok = True) # EFI bootloader binary dir
# Inner dir (miniboot.img file)
#sizetotal = 2097152 # 2MB wiggle room. increase this if we add IA64.
sizetotal = 34603008 # 33MB wiggle room. increase this if we add IA64.
sizetotal += os.path.getsize(innerefi64)
sizefiles = ['HashTool', 'PreLoader']
for f in sizefiles:
sizetotal += os.path.getsize('{0}/root.x86_64/usr/share/efitools/efi/{1}.efi'.format(
chrootdir,
f))
# These won't be *quite* accurate since it's before the template substitution,
# but it'll be close enough.
for (path, dirs, files) in os.walk(templates_dir):
for file in files:
fname = os.path.join(path, file)
sizetotal += os.path.getsize(fname)
print("{0}: [IPXE] Creating EFI ESP image {1} ({2})...".format(
datetime.datetime.now(),
efiboot_img,
humanize.naturalsize(sizetotal)))
if os.path.isfile(efiboot_img):
os.remove(efiboot_img)
with open(efiboot_img, 'wb+') as f:
f.truncate(sizetotal)
DEVNULL = open(os.devnull, 'w')
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', 'iPXE_EFI', efiboot_img]
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
cmd = ['/bin/mount', efiboot_img, mountpt]
subprocess.call(cmd)
os.makedirs(mountpt + '/EFI/boot', exist_ok = True) # "Inner" (EFI image)
#os.makedirs('{0}/EFI/{1}'.format(mountpt, bdisk['name']), exist_ok = True) # "Inner" (EFI image)
os.makedirs('{0}/boot'.format(bootdir), exist_ok = True) # kernel(s)
os.makedirs('{0}/loader/entries'.format(bootdir), exist_ok = True) # EFI
for d in (mountpt, bootdir):
shutil.copy2(innerefi64,'{0}/EFI/boot/ipxe.efi'.format(d))
for f in ('PreLoader.efi', 'HashTool.efi'):
if f == 'PreLoader.efi':
fname = 'bootx64.efi'
else:
fname = f
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
chrootdir,f),
'rb') as r:
with open('{0}/EFI/boot/{1}'.format(mountpt, fname), 'wb') as file:
file.write(r.read())
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
chrootdir, f),
'rb') as r:
with open('{0}/EFI/boot/{1}'.format(bootdir, fname), 'wb+') as file:
file.write(r.read())
# And the systemd efi bootloader.
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
chrootdir),
'rb') as r:
with open('{0}/EFI/boot/loader.efi'.format(mountpt), 'wb+') as f:
f.write(r.read())
# And loader entries.
os.makedirs('{0}/loader/entries'.format(mountpt, exist_ok = True))
for t in ('loader', 'base'):
if t == 'base':
name = bdisk['uxname']
tplpath = '{0}/loader/entries'.format(mountpt)
else:
name = t
tplpath = '{0}/loader'.format(mountpt)
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
tpl_out = tpl.render(build = build, bdisk = bdisk)
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
f.write(tpl_out)
cmd = ['/bin/umount', mountpt]
subprocess.call(cmd)
# Outer dir
outerdir = True
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True) # BIOS
# Loader entries (outer)
for t in ('loader','base'):
if t == 'base':
name = bdisk['uxname']
tplpath = '{0}/loader/entries'.format(bootdir)
else:
name = t
tplpath = '{0}/loader'.format(bootdir)
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
tpl_out = tpl.render(build = build, bdisk = bdisk, outerdir = outerdir)
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
f.write(tpl_out)
if mini:
# BIOS prepping
shutil.copy2('{0}/src/bin/ipxe.lkrn'.format(ipxe_src), '{0}/boot/ipxe.krn'.format(bootdir))
isolinux_filelst = ['isolinux.bin',
'ldlinux.c32']
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True)
for f in isolinux_filelst:
shutil.copy2('{0}/root.{1}/usr/lib/syslinux/bios/{2}'.format(chrootdir, arch[0], f), '{0}/isolinux/{1}'.format(bootdir, f))
tpl = env.get_template('BIOS/isolinux.cfg.j2')
tpl_out = tpl.render(build = build, bdisk = bdisk)
with open('{0}/isolinux/isolinux.cfg'.format(bootdir), "w+") as f:
f.write(tpl_out)
print("{0}: [IPXE] Building Mini ISO ({1})...".format(datetime.datetime.now(), isopath))
if efi:
cmd = ['/usr/bin/xorriso',
'-as', 'mkisofs',
'-iso-level', '3',
'-full-iso9660-filenames',
'-volid', bdisk['name'] + '_MINI',
'-appid', bdisk['desc'],
'-publisher', bdisk['dev'],
'-preparer', 'prepared by ' + bdisk['dev'],
'-eltorito-boot', 'isolinux/isolinux.bin',
'-eltorito-catalog', 'isolinux/boot.cat',
'-no-emul-boot',
'-boot-load-size', '4',
'-boot-info-table',
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
'-eltorito-alt-boot',
'-e', 'EFI/{0}/{1}'.format(bdisk['name'], os.path.basename(efiboot_img)),
'-no-emul-boot',
'-isohybrid-gpt-basdat',
'-output', isopath,
bootdir]
else:
# UNTESTED. TODO.
# I think i want to also get rid of: -boot-load-size 4,
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
cmd = ['/usr/bin/xorriso',
'-as', 'mkisofs',
'-iso-level', '3',
'-full-iso9660-filenames',
'-volid', bdisk['name'] + '_MINI',
'-appid', bdisk['desc'],
'-publisher', bdisk['dev'],
'-preparer', 'prepared by ' + bdisk['dev'],
'-eltorito-boot', 'isolinux/isolinux.bin',
'-eltorito-catalog', 'isolinux/boot.cat',
'-no-emul-boot',
'-boot-load-size', '4',
'-boot-info-table',
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
'-no-emul-boot',
'-isohybrid-gpt-basdat',
'-output', isopath,
bootdir]
DEVNULL = open(os.devnull, 'w')
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
# Get size of ISO
iso['name'] = ['Mini']
iso['Mini'] = {}
iso['Mini']['sha'] = hashlib.sha256()
with open(isopath, 'rb') as f:
while True:
stream = f.read(65536) # 64kb chunks
if not stream:
break
iso['Mini']['sha'].update(stream)
iso['Mini']['sha'] = iso['Mini']['sha'].hexdigest()
iso['Mini']['file'] = isopath
iso['Mini']['size'] = humanize.naturalsize(os.path.getsize(isopath))
iso['Mini']['type'] = 'Mini'
iso['Mini']['fmt'] = 'Hybrid ISO'
return(iso)
def tftpbootEnv(conf):
build = conf['build']
ipxe = conf['ipxe']
sync = conf['sync']
if sync['tftp']:
pass # TODO: generate a pxelinux.cfg in bdisk/tftp.py (to write) and sync in the ipxe chainloader here

0
bdisk/main.py Normal file
View File

View File

@@ -1,375 +0,0 @@
import os
import shutil
import re
import hashlib
import tarfile
import subprocess
import re
import jinja2
import datetime
import humanize
from urllib.request import urlopen
import host # bdisk.host
import bGPG # bdisk.bGPG
def dirChk(conf):
# Make dirs if they don't exist
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
os.makedirs(conf['build'][d], exist_ok = True)
# Make dirs for sync staging if we need to
for x in ('http', 'tftp'):
if conf['sync'][x]:
os.makedirs(conf[x]['path'], exist_ok = True)
def downloadTarball(conf):
build = conf['build']
dlpath = build['dlpath']
src = conf['src']
arch = build['arch']
tarball_path = {}
for a in arch:
locsrc = conf['source_' + a]
mirror = locsrc['mirrorproto'] + '://' + locsrc['mirror']
rlsdir = mirror + locsrc['mirrorpath']
if locsrc['mirrorchksum'] != '':
if locsrc['chksumtype'] == '':
exit("{0}: source_{1}:chksumtype is unset!".format(datetime.datetime.now(), a))
hash_type = locsrc['chksumtype']
hash_in = urlopen(mirror + locsrc['mirrorchksum'])
hashsums = hash_in.read()
hash_in.close()
hash_raw = hashsums.decode("utf-8")
hash_list = list(filter(None, hash_raw.split('\n')))
hash_dict = {x.split()[1]: x.split()[0] for x in hash_list}
# returns path/filename e.g. /some/path/to/file.tar.gz
# we use .gnupg since we'll need it later.
os.makedirs(dlpath + '/.gnupg', exist_ok = True)
tarball_path[a] = dlpath + '/.latest.' + a + '.tar'
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
if locsrc['mirrorfile'] != '':
tarball = locsrc['mirrorfile']
else:
tarball = [filename.group(0) for l in list(hash_dict.keys()) for filename in [pattern.search(l)] if filename][0]
if locsrc['mirrorchksum'] != '':
hashsum = hash_dict[tarball]
if os.path.isfile(tarball_path[a]):
pass
else:
# fetch the tarball...
print("{0}: [PREP] Fetching tarball ({1} architecture)...".format(
datetime.datetime.now(),
a))
tarball_dl = urlopen(rlsdir + tarball)
with open(tarball_path[a], 'wb') as f:
f.write(tarball_dl.read())
tarball_dl.close()
print("{0}: [PREP] Done fetching {1} ({2}).".format(
datetime.datetime.now(),
tarball_path[a],
humanize.naturalsize(
os.path.getsize(tarball_path[a]))))
if locsrc['mirrorchksum'] != '':
print("{0}: [PREP] Checking hash checksum {1} against {2}...".format(
datetime.datetime.now(),
hashsum,
tarball_path[a]))
# Calculate the checksum according to type specified.
tarball_hash = False
for i in hashlib.algorithms_available:
if hash_type == i:
hashfunc = getattr(hashlib, i)
tarball_hash = hashfunc(open(tarball_path[a], 'rb').read()).hexdigest()
break
if not tarball_hash:
exit("{0}: source_{1}:chksumtype '{2}' is not supported on this machine!".format(
datetime.datetime.now(),
a,
hash_type))
if tarball_hash != hashsum:
exit(("{0}: {1} either did not download correctly\n\t\t\t or a wrong (probably old) version exists on the filesystem.\n\t\t\t " +
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
if locsrc['mirrorgpgsig'] != '':
# let's verify the signature.
if locsrc['mirrorgpgsig'] == '.sig':
gpgsig_remote = rlsdir + tarball + '.sig'
else:
gpgsig_remote = locsrc['mirrorgpgsig']
sig_dl = urlopen(gpgsig_remote)
sig = tarball_path[a] + '.sig'
with open(sig, 'wb+') as f:
f.write(sig_dl.read())
sig_dl.close()
gpg_verify = bGPG.gpgVerify(sig, tarball_path[a], conf)
if not gpg_verify:
exit("{0}: There was a failure checking {1} against {2}. Please investigate.".format(
datetime.datetime.now(),
sig,
tarball_path[a]))
return(tarball_path)
def unpackTarball(tarball_path, build, keep = False):
chrootdir = build['chrootdir']
if os.path.isdir(chrootdir):
if not keep:
# Make the dir if it doesn't exist
shutil.rmtree(chrootdir, ignore_errors = True)
os.makedirs(chrootdir, exist_ok = True)
else:
os.makedirs(chrootdir, exist_ok = True)
# Open and extract the tarball
if not keep:
for a in build['arch']:
print("{0}: [PREP] Extracting tarball {1} ({2})...".format(
datetime.datetime.now(),
tarball_path[a],
humanize.naturalsize(
os.path.getsize(tarball_path[a]))))
tar = tarfile.open(tarball_path[a], 'r:gz')
tar.extractall(path = chrootdir)
tar.close()
print("{0}: [PREP] Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
def buildChroot(conf, keep = False):
build = conf['build']
bdisk = conf['bdisk']
user = conf['user']
dlpath = build['dlpath']
chrootdir = build['chrootdir']
arch = build['arch']
extradir = build['basedir'] + '/extra'
unpack_me = unpackTarball(downloadTarball(conf), build, keep)
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
prebuild_overlay = {}
prebuild_arch_overlay = {}
for x in arch:
prebuild_arch_overlay[x] = {}
for y in ['files', 'dirs']:
prebuild_overlay[y] = []
prebuild_arch_overlay[x][y] = []
for path, dirs, files in os.walk('{0}/pre-build.d/'.format(extradir)):
prebuild_overlay['dirs'].append('{0}/'.format(path))
for file in files:
prebuild_overlay['files'].append(os.path.join(path, file))
for x in prebuild_overlay.keys():
prebuild_overlay[x][:] = [re.sub('^{0}/pre-build.d/'.format(extradir), '', s) for s in prebuild_overlay[x]]
prebuild_overlay[x] = list(filter(None, prebuild_overlay[x]))
for y in prebuild_arch_overlay.keys():
prebuild_arch_overlay[y][x][:] = [i for i in prebuild_overlay[x] if i.startswith(y)]
prebuild_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in prebuild_arch_overlay[y][x]]
prebuild_arch_overlay[y][x] = list(filter(None, prebuild_arch_overlay[y][x]))
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
prebuild_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
for a in arch:
for dir in prebuild_overlay['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
# and copy over the files. again, chown to root.
for file in prebuild_overlay['files']:
shutil.copy2('{0}/pre-build.d/{1}'.format(extradir, file),
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
# do the same for arch-specific stuff.
for dir in prebuild_arch_overlay[a]['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
for file in prebuild_arch_overlay[a]['files']:
shutil.copy2('{0}/pre-build.d/{1}/{2}'.format(extradir, a, file),
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
def prepChroot(conf):
build = conf['build']
bdisk = conf['bdisk']
user = conf['user']
chrootdir = build['chrootdir']
prepdir = build['prepdir']
arch = build['arch']
bdisk_repo_dir = build['basedir']
dlpath = build['dlpath']
templates_dir = bdisk_repo_dir + '/extra/templates'
#build = {} # why was this here?
## let's prep some variables to write out the version info.txt
# and these should be passed in from the args, from the most part.
build['name'] = bdisk['name']
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
hostname = host.getHostname
build['user'] = os.environ['USER']
if 'SUDO_USER' in os.environ:
build['realuser'] = os.environ['SUDO_USER']
build['buildnum'] += 1
with open(dlpath + '/buildnum', 'w+') as f:
f.write(str(build['buildnum']) + "\n")
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = loader)
tpl = env.get_template('VERSION_INFO.txt.j2')
tpl_out = tpl.render(build = build, bdisk = bdisk, hostname = host.getHostname(), distro = host.getOS())
for a in arch:
# Copy the GPG pubkey
shutil.copy2('{0}/gpgkey.pub'.format(dlpath), '{0}/root.{1}/root/pubkey.gpg'.format(chrootdir, a))
# Write the VERSION_INFO.txt from template
with open('{0}/root.{1}/root/VERSION_INFO.txt'.format(chrootdir, a), 'w+') as f:
f.write(tpl_out)
with open('{0}/VERSION_INFO.txt'.format(prepdir), 'w+') as f:
f.write(tpl_out)
# And perform the templating overlays
templates_overlay = {}
templates_arch_overlay = {}
for x in arch:
templates_arch_overlay[x] = {}
for y in ['files', 'dirs']:
templates_overlay[y] = []
templates_arch_overlay[x][y] = []
for path, dirs, files in os.walk('{0}/pre-build.d'.format(templates_dir)):
for dir in dirs:
templates_overlay['dirs'].append('{0}/'.format(dir))
for file in files:
templates_overlay['files'].append(os.path.join(path, file))
for x in templates_overlay.keys():
templates_overlay[x][:] = [re.sub('^{0}/pre-build.d/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
templates_overlay[x] = list(filter(None, templates_overlay[x]))
for y in templates_arch_overlay.keys():
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
if '/' in templates_overlay['dirs']:
templates_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
if build['gpg']:
gpg = conf['gpgobj']
if conf['gpg']['mygpgkey']:
signkey = conf['gpg']['mygpgkey']
else:
signkey = str(gpg.signers[0].subkeys[0].fpr)
for a in arch:
for dir in templates_overlay['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
# and write the files. again, chown to root.
for file in templates_overlay['files']:
tplname = 'pre-build.d/{0}.j2'.format(file)
tpl = env.get_template(tplname)
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey, user = user)
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
f.write(tpl_out)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
# do the same for arch-specific stuff.
for dir in templates_arch_overlay[a]['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
for file in templates_arch_overlay[a]['files']:
tplname = 'pre-build.d/{0}/{1}.j2'.format(a, file)
tpl = env.get_template('{0}'.format(tplname))
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
f.write(tpl_out)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
return(build)
def postChroot(conf):
build = conf['build']
bdisk = conf['bdisk']
dlpath = build['dlpath']
chrootdir = build['chrootdir']
arch = build['arch']
overdir = build['basedir'] + '/overlay/'
templates_dir = '{0}/extra/templates'.format(build['basedir'])
loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = loader)
postbuild_overlay = {}
postbuild_arch_overlay = {}
for x in arch:
os.remove('{0}/root.{1}/README'.format(chrootdir, x))
postbuild_arch_overlay[x] = {}
for y in ['files', 'dirs']:
postbuild_overlay[y] = []
postbuild_arch_overlay[x][y] = []
for path, dirs, files in os.walk(overdir):
postbuild_overlay['dirs'].append('{0}/'.format(path))
for file in files:
postbuild_overlay['files'].append(os.path.join(path, file))
for x in postbuild_overlay.keys():
postbuild_overlay[x][:] = [re.sub('^' + overdir, '', s) for s in postbuild_overlay[x]]
postbuild_overlay[x] = list(filter(None, postbuild_overlay[x]))
for y in postbuild_arch_overlay.keys():
postbuild_arch_overlay[y][x][:] = [i for i in postbuild_overlay[x] if i.startswith(y)]
postbuild_arch_overlay[y][x][:] = [re.sub('^' + y + '/', '', s) for s in postbuild_arch_overlay[y][x]]
postbuild_arch_overlay[y][x] = list(filter(None, postbuild_arch_overlay[y][x]))
postbuild_overlay[x][:] = [y for y in postbuild_overlay[x] if not y.startswith(('x86_64','i686'))]
postbuild_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
for a in arch:
for dir in postbuild_overlay['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
# and copy over the files. again, chown to root.
for file in postbuild_overlay['files']:
shutil.copy2(overdir + file, '{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
# do the same for arch-specific stuff.
for dir in postbuild_arch_overlay[a]['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
for file in postbuild_arch_overlay[a]['files']:
shutil.copy2('{0}{1}/{2}'.format(overdir, a, file),
'{0}/root.{1}/{2}'.format(chrootdir, a, file),
follow_symlinks = False)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
# And perform the templating overlays
templates_overlay = {}
templates_arch_overlay = {}
for x in arch:
templates_arch_overlay[x] = {}
for y in ['files', 'dirs']:
templates_overlay[y] = []
templates_arch_overlay[x][y] = []
for path, dirs, files in os.walk('{0}/overlay'.format(templates_dir)):
for dir in dirs:
templates_overlay['dirs'].append('{0}/'.format(dir))
for file in files:
templates_overlay['files'].append(os.path.join(path, file))
for x in templates_overlay.keys():
templates_overlay[x][:] = [re.sub('^{0}/overlay/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
templates_overlay[x] = list(filter(None, templates_overlay[x]))
for y in templates_arch_overlay.keys():
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
if '/' in templates_overlay['dirs']:
templates_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
if build['gpg']:
gpg = conf['gpgobj']
if conf['gpg']['mygpgkey']:
signkey = conf['gpg']['mygpgkey']
else:
signkey = str(gpg.signers[0].subkeys[0].fpr)
for a in arch:
for dir in templates_overlay['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
# and write the files. again, chown to root.
for file in templates_overlay['files']:
tplname = 'overlay/{0}.j2'.format(file)
tpl = env.get_template(tplname)
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
f.write(tpl_out)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
# do the same for arch-specific stuff.
for dir in templates_arch_overlay[a]['dirs']:
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
for file in templates_arch_overlay[a]['files']:
tplname = 'overlay/{0}/{1}.j2'.format(a, file)
tpl = env.get_template(tplname)
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
f.write(tpl_out)
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)

0
bdisk/sync.py Normal file
View File

1
bdisk/version.py Normal file
View File

@@ -0,0 +1 @@
BDISK_VERSION = '4.0.0a1'