think i might have something working. ipxe untested still.
This commit is contained in:
159
bdisk/bGPG.py
159
bdisk/bGPG.py
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
from io import BytesIO
|
||||
import subprocess
|
||||
import datetime
|
||||
import jinja2
|
||||
@@ -8,8 +9,10 @@ import psutil
|
||||
def genGPG(conf):
|
||||
# https://media.readthedocs.org/pdf/pygpgme/latest/pygpgme.pdf
|
||||
build = conf['build']
|
||||
bdisk = conf['bdisk']
|
||||
gpghome = conf['gpg']['mygpghome']
|
||||
distkey = build['gpgkey']
|
||||
gpgkeyserver = build['gpgkeyserver']
|
||||
templates_dir = '{0}/extra/templates'.format(build['basedir'])
|
||||
mykey = False
|
||||
pkeys = []
|
||||
@@ -26,14 +29,16 @@ def genGPG(conf):
|
||||
os.environ['GNUPGHOME'] = gpghome
|
||||
gpg = gpgme.Context()
|
||||
# do we need to add a keyserver?
|
||||
if build['gpgkeyserver'] != '':
|
||||
if gpgkeyserver != '':
|
||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
||||
if os.path.isfile(dirmgr):
|
||||
with open(dirmgr, 'r+') as f:
|
||||
findme = any(gpgmirror in line for line in f)
|
||||
findme = any(gpgkeyserver in line for line in f)
|
||||
if not findme:
|
||||
f.seek(0, os.SEEK_END)
|
||||
f.write("\n# Added by {0}.\nkeyserver {1}\n")
|
||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
||||
bdisk['pname'],
|
||||
gpgkeyserver))
|
||||
if mykey:
|
||||
try:
|
||||
privkey = gpg.get_key(mykey, True)
|
||||
@@ -58,22 +63,28 @@ def genGPG(conf):
|
||||
if build['gpgkeyserver'] != '':
|
||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
||||
with open(dirmgr, 'r+') as f:
|
||||
findme = any(gpgmirror in line for line in f)
|
||||
findme = any(gpgkeyserver in line for line in f)
|
||||
if not findme:
|
||||
f.seek(0, os.SEEK_END)
|
||||
f.write("\n# Added by {0}.\nkeyserver {1}\n"
|
||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
||||
bdisk['pname'],
|
||||
build['gpgkeyserver']))
|
||||
gpg.signers = pkeys
|
||||
# Now we try to find and add the key for the base image.
|
||||
gpg.keylist_mode = 2 # remote (keyserver)
|
||||
try:
|
||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_EXTERN # remote (keyserver)
|
||||
if distkey: # testing
|
||||
#try:
|
||||
key = gpg.get_key(distkey)
|
||||
except:
|
||||
exit('{0}: ERROR: We cannot find key ID {1}!'.format(
|
||||
#except:
|
||||
# exit('{0}: ERROR: We cannot find key ID {1}!'.format(
|
||||
# datetime.datetime.now(),
|
||||
# distkey))
|
||||
importkey = key.subkeys[0].fpr
|
||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_LOCAL # local keyring (default)
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
print('{0}: [GPG] Importing {1} and signing it for verification purposes...'.format(
|
||||
datetime.datetime.now(),
|
||||
distkey))
|
||||
importkey = key.subkeys[0].fpr
|
||||
gpg.keylist_mode = 1 # local keyring (default)
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
cmd = ['/usr/bin/gpg',
|
||||
'--recv-keys',
|
||||
'--batch',
|
||||
@@ -110,49 +121,97 @@ def killStaleAgent(conf):
|
||||
|
||||
def signIMG(path, conf):
|
||||
if conf['build']['gpg']:
|
||||
# If we enabled GPG signing, we need to figure out if we
|
||||
# are using a personal key or the automatically generated one.
|
||||
if conf['gpg']['mygpghome'] != '':
|
||||
gpghome = conf['gpg']['mygpghome']
|
||||
else:
|
||||
gpghome = conf['build']['dlpath'] + '/.gnupg'
|
||||
if conf['gpg']['mygpgkey'] != '':
|
||||
keyid = conf['gpg']['mygpgkey']
|
||||
else:
|
||||
keyid = False
|
||||
# We want to kill off any stale gpg-agents so we spawn a new one.
|
||||
killStaleAgent()
|
||||
## HERE BE DRAGONS. Converting to PyGPGME...
|
||||
# List of Key instances used for signing with sign() and encrypt_sign().
|
||||
gpg = gpgme.Context()
|
||||
if keyid:
|
||||
gpg.signers = gpg.get_key(keyid)
|
||||
else:
|
||||
# Try to "guess" the key ID.
|
||||
# If we got here, it means we generated a key earlier during the tarball download...
|
||||
# So we can use that!
|
||||
pass
|
||||
# And if we didn't specify one manually, we'll pick the first one we find.
|
||||
# This way we can use the automatically generated one from prep.
|
||||
if not keyid:
|
||||
keyid = gpg.list_keys(True)[0]['keyid']
|
||||
print('{0}: [BUILD] Signing {1} with {2}...'.format(
|
||||
# Do we want to kill off any stale gpg-agents? (So we spawn a new one)
|
||||
# Requires further testing.
|
||||
#killStaleAgent()
|
||||
gpg = conf['gpgobj']
|
||||
print('{0}: [GPG] Signing {1}...'.format(
|
||||
datetime.datetime.now(),
|
||||
path,
|
||||
keyid))
|
||||
# TODO: remove this warning when upstream python-gnupg fixes
|
||||
print('\t\t\t If you see a "ValueError: Unknown status message: \'KEY_CONSIDERED\'" error, ' +
|
||||
'it can be safely ignored.')
|
||||
print('\t\t\t If this is taking a VERY LONG time, try installing haveged and starting it. ' +
|
||||
'This can be done safely in parallel with the build process.')
|
||||
path))
|
||||
# May not be necessary; further testing necessary
|
||||
#if os.getenv('GPG_AGENT_INFO'):
|
||||
# del os.environ['GPG_AGENT_INFO']
|
||||
gpg = conf['gpgobj']
|
||||
# ASCII-armor (.asc)
|
||||
gpg.armor = True
|
||||
data_in = open(path, 'rb')
|
||||
gpg.sign_file(data_in, keyid = keyid, detach = True,
|
||||
clearsign = False, output = '{0}.sig'.format(path))
|
||||
sigbuf = BytesIO()
|
||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
||||
_ = sigbuf.seek(0)
|
||||
_ = data_in.seek(0)
|
||||
data_in.close()
|
||||
with open('{0}.asc'.format(path), 'wb') as f:
|
||||
f.write(sigbuf.read())
|
||||
print('{0}: [GPG] Wrote {1}.asc (ASCII-armored signature).'.format(
|
||||
datetime.datetime.now(),
|
||||
path))
|
||||
# Binary signature (.sig)
|
||||
gpg.armor = False
|
||||
data_in = open(path, 'rb')
|
||||
sigbuf = BytesIO()
|
||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
||||
_ = sigbuf.seek(0)
|
||||
_ = data_in.seek(0)
|
||||
data_in.close()
|
||||
with open('{0}.sig'.format(path), 'wb') as f:
|
||||
f.write(sigbuf.read())
|
||||
print('{0}: [GPG] Wrote {1}.sig (binary signature).'.format(
|
||||
datetime.datetime.now(),
|
||||
path))
|
||||
|
||||
def gpgVerify(sigfile, datafile, conf):
|
||||
pass
|
||||
gpg = conf['gpgobj']
|
||||
fullkeys = []
|
||||
print('{0}: [GPG] Verifying {1} with {2}...'.format(
|
||||
datetime.datetime.now(),
|
||||
datafile,
|
||||
sigfile))
|
||||
keylst = gpg.keylist()
|
||||
for k in keylst:
|
||||
fullkeys.append(k.subkeys[0].fpr)
|
||||
with open(sigfile,'rb') as s:
|
||||
with open(datafile, 'rb') as f:
|
||||
sig = gpg.verify(s, f, None)
|
||||
for x in sig:
|
||||
if x.validity <= 1:
|
||||
if not x.validity_reason:
|
||||
reason = 'we require a signature trust of 2 or higher'
|
||||
else:
|
||||
reason = x.validity_reason
|
||||
print('{0}: [GPG] Key {1} failed to verify: {2}'.format(
|
||||
datetime.datetime.now(),
|
||||
x.fpr,
|
||||
reason))
|
||||
verified = False
|
||||
skeys = []
|
||||
for k in sig:
|
||||
skeys.append(k.fpr)
|
||||
if k.fpr in fullkeys:
|
||||
verified = True
|
||||
break
|
||||
else:
|
||||
pass
|
||||
if verified:
|
||||
print('{0}: [GPG] {1} verified (success).'.format(
|
||||
datetime.datetime.now(),
|
||||
datafile))
|
||||
else:
|
||||
print('{0}: [GPG] {1} failed verification!'.format(
|
||||
datetime.datetime.now(),
|
||||
datafile))
|
||||
return(verified)
|
||||
|
||||
def delTempKeys(conf):
|
||||
pass
|
||||
# Create a config option to delete these.
|
||||
# It's handy to keep these keys, but I'd understand if
|
||||
# people didn't want to use them.
|
||||
gpg = conf['gpgobj']
|
||||
if conf['gpg']:
|
||||
keys = []
|
||||
if conf['gpgkey'] != '':
|
||||
keys.append(gpg.get_key(conf['gpgkey']))
|
||||
if conf['mygpghome'] == '':
|
||||
keys.append(gpg.get_key(None, True)) # this is safe; we generated our own
|
||||
for k in keys:
|
||||
gpg.delete(k)
|
||||
killStaleAgent(conf)
|
||||
|
||||
@@ -18,9 +18,10 @@ if __name__ == '__main__':
|
||||
conf = host.parseConfig(host.getConfig())[1]
|
||||
prep.dirChk(conf)
|
||||
conf['gpgobj'] = bGPG.genGPG(conf)
|
||||
prep.buildChroot(conf['build'], keep = False)
|
||||
prep.prepChroot(conf['build'], conf['bdisk'], conf['user'])
|
||||
prep.buildChroot(conf, keep = False)
|
||||
prep.prepChroot(conf)
|
||||
arch = conf['build']['arch']
|
||||
#bGPG.killStaleAgent(conf)
|
||||
for a in arch:
|
||||
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
|
||||
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
|
||||
@@ -29,7 +30,7 @@ if __name__ == '__main__':
|
||||
build.genImg(conf)
|
||||
build.genUEFI(conf['build'], conf['bdisk'])
|
||||
fulliso = build.genISO(conf)
|
||||
build.signIMG(fulliso['Main']['file'], conf)
|
||||
bGPG.signIMG(fulliso['Main']['file'], conf)
|
||||
build.displayStats(fulliso)
|
||||
if conf['build']['ipxe']:
|
||||
bSSL.sslPKI(conf)
|
||||
@@ -39,7 +40,7 @@ if __name__ == '__main__':
|
||||
for x in iso.keys():
|
||||
if x != 'name':
|
||||
path = iso[x]['file']
|
||||
build.signIMG(path, conf)
|
||||
bGPG.signIMG(path, conf)
|
||||
build.displayStats(iso)
|
||||
bsync.http(conf)
|
||||
bsync.tftp(conf)
|
||||
|
||||
@@ -10,7 +10,7 @@ import subprocess
|
||||
def http(conf):
|
||||
http = conf['http']
|
||||
build = conf['build']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
arch = build['arch']
|
||||
bdisk = conf['bdisk']
|
||||
if conf['sync']['http']:
|
||||
@@ -48,7 +48,7 @@ def http(conf):
|
||||
fulldest = '{0}/{1}'.format(httpdir, destpath)
|
||||
parentdir = os.path.split(fulldest)[0]
|
||||
os.makedirs(parentdir, exist_ok = True)
|
||||
shutil.copy2('{0}/{1}'.format(tempdir, k), '{0}/{1}'.format(httpdir, httpfiles[k]))
|
||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(httpdir, httpfiles[k]))
|
||||
for root, dirs, files in os.walk(httpdir):
|
||||
for d in dirs:
|
||||
os.chown(os.path.join(root, d), uid, gid)
|
||||
@@ -59,7 +59,7 @@ def tftp(conf):
|
||||
# TODO: pxelinux cfg
|
||||
tftp = conf['tftp']
|
||||
build = conf['build']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
arch = build['arch']
|
||||
bdisk = conf['bdisk']
|
||||
if conf['sync']['tftp']:
|
||||
@@ -96,7 +96,7 @@ def tftp(conf):
|
||||
fulldest = '{0}/{1}'.format(tftpdir, destpath)
|
||||
parentdir = os.path.split(fulldest)[0]
|
||||
os.makedirs(parentdir, exist_ok = True)
|
||||
shutil.copy2('{0}/{1}'.format(tempdir, k), '{0}/{1}'.format(tftpdir, tftpfiles[k]))
|
||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(tftpdir, tftpfiles[k]))
|
||||
for root, dirs, files in os.walk(tftpdir):
|
||||
for d in dirs:
|
||||
os.chown(os.path.join(root, d), uid, gid)
|
||||
@@ -121,7 +121,7 @@ def rsync(conf):
|
||||
# and do nothing if http- copying over three copies of the squashed filesystems
|
||||
# is a waste of time, bandwidth, and disk space on target.
|
||||
build = conf['build']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
isodir = build['isodir']
|
||||
arch = build['arch']
|
||||
rsync = conf['rsync']
|
||||
@@ -159,7 +159,7 @@ def rsync(conf):
|
||||
cmd[4],
|
||||
server))
|
||||
subprocess.call(cmd)
|
||||
cmd[4] = '{0}/boot'.format(build['tempdir'])
|
||||
cmd[4] = '{0}/boot'.format(build['prepdir'])
|
||||
subprocess.call(cmd)
|
||||
if conf['rsync']['iso']:
|
||||
cmd[4] = isodir
|
||||
@@ -170,7 +170,7 @@ def rsync(conf):
|
||||
subprocess.call(cmd)
|
||||
# Now we copy some extra files.
|
||||
prebuild_dir = '{0}/extra/pre-build.d'.format(build['basedir'])
|
||||
rsync_files = ['{0}/VERSION_INFO.txt'.format(tempdir),
|
||||
rsync_files = ['{0}/VERSION_INFO.txt'.format(prepdir),
|
||||
'{0}/root/packages.both'.format(prebuild_dir),
|
||||
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
|
||||
for x in rsync_files:
|
||||
|
||||
@@ -8,6 +8,7 @@ import psutil
|
||||
import jinja2
|
||||
import humanize
|
||||
import datetime
|
||||
import bGPG # bdisk.bGPG
|
||||
from urllib.request import urlopen
|
||||
|
||||
|
||||
@@ -18,7 +19,7 @@ def genImg(conf):
|
||||
chrootdir = build['chrootdir']
|
||||
archboot = build['archboot']
|
||||
basedir = build['basedir']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
hashes = {}
|
||||
hashes['sha256'] = {}
|
||||
hashes['md5'] = {}
|
||||
@@ -71,11 +72,11 @@ def genImg(conf):
|
||||
squashfses.append('{0}'.format(squashimg))
|
||||
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
|
||||
# Logo
|
||||
os.makedirs(tempdir + '/boot', exist_ok = True)
|
||||
os.makedirs(prepdir + '/boot', exist_ok = True)
|
||||
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
||||
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
||||
else:
|
||||
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
||||
# Kernels, initrds...
|
||||
# We use a dict here so we can use the right filenames...
|
||||
# I might change how I handle this in the future.
|
||||
@@ -83,9 +84,9 @@ def genImg(conf):
|
||||
bootfiles['kernel'] = ['vmlinuz-linux-' + bdisk['name'], '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
||||
bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
||||
for x in ('kernel', 'initrd'):
|
||||
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(tempdir, bootfiles[x][1]))
|
||||
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(prepdir, bootfiles[x][1]))
|
||||
for i in squashfses:
|
||||
signIMG(i, conf)
|
||||
bGPG.signIMG(i, conf)
|
||||
|
||||
|
||||
def genUEFI(build, bdisk):
|
||||
@@ -95,19 +96,20 @@ def genUEFI(build, bdisk):
|
||||
# Plus there's always multiarch.
|
||||
# I can probably do this better with a dict... TODO.
|
||||
if 'x86_64' in arch:
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
basedir = build['basedir']
|
||||
chrootdir = build['chrootdir']
|
||||
mountpt = build['mountpt']
|
||||
templates_dir = build['basedir'] + '/extra/templates'
|
||||
efidir = '{0}/EFI/{1}'.format(tempdir, bdisk['name'])
|
||||
efidir = '{0}/EFI/{1}'.format(prepdir, bdisk['name'])
|
||||
os.makedirs(efidir, exist_ok = True)
|
||||
efiboot_img = efidir + '/efiboot.img'
|
||||
os.makedirs(tempdir + '/EFI/boot', exist_ok = True)
|
||||
os.makedirs(prepdir + '/EFI/boot', exist_ok = True)
|
||||
os.makedirs(efidir, exist_ok = True)
|
||||
## Download the EFI shells if we don't have them.
|
||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
||||
shell2_path = tempdir + '/EFI/shellx64_v2.efi'
|
||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v2.efi'):
|
||||
shell2_path = prepdir + '/EFI/shellx64_v2.efi'
|
||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell2_path))
|
||||
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
||||
shell2_fetch = urlopen(shell2_url)
|
||||
@@ -116,8 +118,8 @@ def genUEFI(build, bdisk):
|
||||
shell2_fetch.close()
|
||||
# Shell for older versions (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=Efi-shell)
|
||||
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v1.efi'):
|
||||
shell1_path = tempdir + '/EFI/shellx64_v1.efi'
|
||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v1.efi'):
|
||||
shell1_path = prepdir + '/EFI/shellx64_v1.efi'
|
||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell1_path))
|
||||
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
||||
shell1_fetch = urlopen(shell1_url)
|
||||
@@ -133,20 +135,20 @@ def genUEFI(build, bdisk):
|
||||
fname = 'bootx64.efi'
|
||||
else:
|
||||
fname = f
|
||||
if not os.path.isfile(tempdir + '/EFI/boot/' + fname):
|
||||
if not os.path.isfile(prepdir + '/EFI/boot/' + fname):
|
||||
url = shim_url + f
|
||||
url_fetch = urlopen(url)
|
||||
with open(tempdir + '/EFI/boot/' + fname, 'wb+') as dl:
|
||||
with open(prepdir + '/EFI/boot/' + fname, 'wb+') as dl:
|
||||
dl.write(url_fetch.read())
|
||||
url_fetch.close()
|
||||
# And we also need the systemd efi bootloader.
|
||||
if os.path.isfile(tempdir + '/EFI/boot/loader.efi'):
|
||||
os.remove(tempdir + '/EFI/boot/loader.efi')
|
||||
shutil.copy2(chrootdir + '/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi', tempdir + '/EFI/boot/loader.efi')
|
||||
if os.path.isfile(prepdir + '/EFI/boot/loader.efi'):
|
||||
os.remove(prepdir + '/EFI/boot/loader.efi')
|
||||
shutil.copy2(chrootdir + '/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi', prepdir + '/EFI/boot/loader.efi')
|
||||
# And the accompanying configs for the systemd efi bootloader, too.
|
||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
||||
env = jinja2.Environment(loader = tpl_loader)
|
||||
os.makedirs(tempdir + '/loader/entries', exist_ok = True)
|
||||
os.makedirs(prepdir + '/loader/entries', exist_ok = True)
|
||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
||||
if t == 'base':
|
||||
fname = bdisk['uxname'] + '.conf'
|
||||
@@ -155,10 +157,10 @@ def genUEFI(build, bdisk):
|
||||
else:
|
||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
||||
if t == 'loader':
|
||||
tplpath = tempdir + '/loader/'
|
||||
tplpath = prepdir + '/loader/'
|
||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
||||
else:
|
||||
tplpath = tempdir + '/loader/entries/'
|
||||
tplpath = prepdir + '/loader/entries/'
|
||||
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
||||
with open(tplpath + fname, "w+") as f:
|
||||
@@ -176,9 +178,9 @@ def genUEFI(build, bdisk):
|
||||
'/EFI/shellx64_v1.efi',
|
||||
'/EFI/shellx64_v2.efi']
|
||||
for i in sizefiles:
|
||||
sizetotal += os.path.getsize(tempdir + i)
|
||||
sizetotal += os.path.getsize(prepdir + i)
|
||||
# Loader configs
|
||||
for (path, dirs, files) in os.walk(tempdir + '/loader/'):
|
||||
for (path, dirs, files) in os.walk(prepdir + '/loader/'):
|
||||
for file in files:
|
||||
fname = os.path.join(path, file)
|
||||
sizetotal += os.path.getsize(fname)
|
||||
@@ -223,13 +225,13 @@ def genUEFI(build, bdisk):
|
||||
with open(tplpath + fname, "w+") as f:
|
||||
f.write(tpl_out)
|
||||
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi'):
|
||||
y = tempdir + '/EFI/boot/' + x
|
||||
y = prepdir + '/EFI/boot/' + x
|
||||
z = mountpt + '/EFI/boot/' + x
|
||||
if os.path.isfile(z):
|
||||
os.remove(z)
|
||||
shutil.copy(y, z)
|
||||
for x in ('shellx64_v1.efi', 'shellx64_v2.efi'):
|
||||
y = tempdir + '/EFI/' + x
|
||||
y = prepdir + '/EFI/' + x
|
||||
z = mountpt + '/EFI/' + x
|
||||
if os.path.isfile(z):
|
||||
os.remove(z)
|
||||
@@ -253,16 +255,16 @@ def genISO(conf):
|
||||
build = conf['build']
|
||||
bdisk = conf['bdisk']
|
||||
archboot = build['archboot']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
templates_dir = build['basedir'] + '/extra/templates'
|
||||
arch = build['arch']
|
||||
builddir = tempdir + '/' + bdisk['name']
|
||||
builddir = prepdir + '/' + bdisk['name']
|
||||
extradir = build['basedir'] + '/extra/'
|
||||
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
|
||||
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
|
||||
# TODO: can we use syslinux for EFI too instead of prebootloader?
|
||||
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
||||
sysl_tmp = tempdir + '/isolinux/'
|
||||
sysl_tmp = prepdir + '/isolinux/'
|
||||
ver = bdisk['ver']
|
||||
if len(arch) == 1:
|
||||
isofile = '{0}-{1}-{2}-{3}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'], arch[0])
|
||||
@@ -285,7 +287,7 @@ def genISO(conf):
|
||||
efi = True
|
||||
if os.path.isfile(isopath):
|
||||
os.remove(isopath)
|
||||
if archboot != tempdir + '/' + bdisk['name']: # best to use static concat here...
|
||||
if archboot != prepdir + '/' + bdisk['name']: # best to use static concat here...
|
||||
if os.path.isdir(builddir):
|
||||
shutil.rmtree(builddir, ignore_errors = True)
|
||||
shutil.copytree(archboot, builddir)
|
||||
@@ -348,7 +350,7 @@ def genISO(conf):
|
||||
'-no-emul-boot',
|
||||
'-isohybrid-gpt-basdat',
|
||||
'-output', isopath,
|
||||
tempdir]
|
||||
prepdir]
|
||||
else:
|
||||
# UNTESTED. TODO.
|
||||
# I think i want to also get rid of: -boot-load-size 4,
|
||||
@@ -371,7 +373,7 @@ def genISO(conf):
|
||||
'-no-emul-boot',
|
||||
'-isohybrid-gpt-basdat',
|
||||
'-output', isopath,
|
||||
tempdir]
|
||||
prepdir]
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||
# Get size of ISO
|
||||
@@ -400,5 +402,5 @@ def displayStats(iso):
|
||||
print('\t\t\t = Location: {0}'.format(iso[i]['file']))
|
||||
|
||||
def cleanUp():
|
||||
# TODO: clear out all of tempdir?
|
||||
# TODO: clear out all of prepdir?
|
||||
pass
|
||||
|
||||
@@ -169,7 +169,7 @@ def parseConfig(confs):
|
||||
datetime.datetime.now(),
|
||||
config_dict['build']['basedir']))
|
||||
# Make dirs if they don't exist
|
||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'):
|
||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
||||
os.makedirs(config_dict['build'][d], exist_ok = True)
|
||||
# Make dirs for sync staging if we need to
|
||||
for x in ('http', 'tftp'):
|
||||
|
||||
@@ -15,7 +15,7 @@ def buildIPXE(conf):
|
||||
bdisk = conf['bdisk']
|
||||
ipxe = conf['ipxe']
|
||||
mini = ipxe['iso']
|
||||
tempdir = conf['build']['tempdir']
|
||||
prepdir = conf['build']['prepdir']
|
||||
templates_dir = build['basedir'] + '/extra/templates'
|
||||
ipxe_tpl = templates_dir + '/iPXE'
|
||||
srcdir = build['srcdir']
|
||||
@@ -102,11 +102,12 @@ def genISO(conf):
|
||||
bdisk = conf['bdisk']
|
||||
ipxe = conf['ipxe']
|
||||
arch = build['arch']
|
||||
dlpath = build['dlpath']
|
||||
ver = bdisk['ver']
|
||||
isodir = build['isodir']
|
||||
isofile = '{0}-{1}-{2}.mini.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
|
||||
isopath = '{0}/{1}'.format(isodir, isofile)
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
chrootdir = build['chrootdir']
|
||||
mini = ipxe['iso']
|
||||
iso = {}
|
||||
@@ -116,8 +117,8 @@ def genISO(conf):
|
||||
templates_dir = build['basedir'] + '/extra/templates/iPXE/'
|
||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
||||
env = jinja2.Environment(loader = tpl_loader)
|
||||
bootdir = tempdir + '/ipxe_mini'
|
||||
efiboot_img = bootdir + '/EFI/BOOT/mini.efi'
|
||||
bootdir = '{0}/ipxe_mini'.format(dlpath)
|
||||
efiboot_img = '{0}/EFI/BOOT/mini.efi'.format(bootdir)
|
||||
innerefi64 = '{0}/src/bin-x86_64-efi/ipxe.efi'.format(ipxe_src)
|
||||
efi = False
|
||||
# this shouldn't be necessary... if it is, we can revisit this in the future. see "Inner dir" below.
|
||||
@@ -130,7 +131,7 @@ def genISO(conf):
|
||||
if os.path.isdir(bootdir):
|
||||
shutil.rmtree(bootdir)
|
||||
os.makedirs('{0}/EFI/BOOT'.format(bootdir), exist_ok = True) # EFI
|
||||
# Inner dir (efiboot.efi file)
|
||||
# Inner dir (mini.efi file)
|
||||
sizetotal = 65536 # 64K wiggle room. increase this if we add IA64.
|
||||
sizetotal += os.path.getsize(innerefi64)
|
||||
print("{0}: [IPXE] Creating EFI ESP image {1} ({2})...".format(
|
||||
@@ -155,7 +156,7 @@ def genISO(conf):
|
||||
os.makedirs('{0}/loader/entries'.format(bootdir), exist_ok = True) # EFI
|
||||
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True) # BIOS
|
||||
# we reuse the preloader.efi from full ISO build
|
||||
shutil.copy2('{0}/EFI/boot/bootx64.efi'.format(tempdir),
|
||||
shutil.copy2('{0}/EFI/boot/bootx64.efi'.format(prepdir),
|
||||
'{0}/EFI/BOOT/BOOTX64.EFI'.format(bootdir))
|
||||
# and we create the loader entries
|
||||
for t in ('loader','base'):
|
||||
@@ -197,7 +198,7 @@ def genISO(conf):
|
||||
'-boot-info-table',
|
||||
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
|
||||
'-eltorito-alt-boot',
|
||||
'-e', 'efiboot.efi',
|
||||
'-e', 'EFI/BOOT/mini.efi',
|
||||
'-no-emul-boot',
|
||||
'-isohybrid-gpt-basdat',
|
||||
'-output', isopath,
|
||||
|
||||
@@ -2,7 +2,6 @@ import os
|
||||
import shutil
|
||||
import re
|
||||
import hashlib
|
||||
import gpgme
|
||||
import tarfile
|
||||
import subprocess
|
||||
import re
|
||||
@@ -11,18 +10,20 @@ import datetime
|
||||
import humanize
|
||||
from urllib.request import urlopen
|
||||
import host # bdisk.host
|
||||
import bGPG # bdisk.bGPG
|
||||
|
||||
|
||||
def dirChk(config_dict):
|
||||
# Make dirs if they don't exist
|
||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'):
|
||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
||||
os.makedirs(config_dict['build'][d], exist_ok = True)
|
||||
# Make dirs for sync staging if we need to
|
||||
for x in ('http', 'tftp'):
|
||||
if config_dict['sync'][x]:
|
||||
os.makedirs(config_dict[x]['path'], exist_ok = True)
|
||||
|
||||
def downloadTarball(build):
|
||||
def downloadTarball(conf):
|
||||
build = conf['build']
|
||||
dlpath = build['dlpath']
|
||||
arch = build['arch']
|
||||
#mirror = 'http://mirrors.kernel.org/archlinux'
|
||||
@@ -41,9 +42,6 @@ def downloadTarball(build):
|
||||
sha_list = list(filter(None, sha_raw.split('\n')))
|
||||
sha_dict = {x.split()[1]: x.split()[0] for x in sha_list}
|
||||
# all that lousy work just to get a sha1 sum. okay. so.
|
||||
if build['mirrorgpgsig'] != '':
|
||||
# we don't want to futz with the user's normal gpg.
|
||||
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
|
||||
for a in arch:
|
||||
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
|
||||
tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0]
|
||||
@@ -114,12 +112,15 @@ def unpackTarball(tarball_path, build, keep = False):
|
||||
tar.close()
|
||||
print("{0}: [PREP] Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
||||
|
||||
def buildChroot(build, keep = False):
|
||||
def buildChroot(conf, keep = False):
|
||||
build = conf['build']
|
||||
bdisk = conf['bdisk']
|
||||
user = conf['user']
|
||||
dlpath = build['dlpath']
|
||||
chrootdir = build['chrootdir']
|
||||
arch = build['arch']
|
||||
extradir = build['basedir'] + '/extra'
|
||||
unpack_me = unpackTarball(downloadTarball(build), build, keep)
|
||||
unpack_me = unpackTarball(downloadTarball(conf), build, keep)
|
||||
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
|
||||
prebuild_overlay = {}
|
||||
prebuild_arch_overlay = {}
|
||||
@@ -158,9 +159,12 @@ def buildChroot(build, keep = False):
|
||||
shutil.copy2(extradir + '/pre-build.d/' + a + '/' + file, chrootdir + '/root.' + a + '/' + file, follow_symlinks = False)
|
||||
os.chown(chrootdir + '/root.' + a + '/' + file, 0, 0, follow_symlinks = False)
|
||||
|
||||
def prepChroot(build, bdisk, user):
|
||||
def prepChroot(conf):
|
||||
build = conf['build']
|
||||
bdisk = conf['bdisk']
|
||||
user = conf['user']
|
||||
chrootdir = build['chrootdir']
|
||||
tempdir = build['tempdir']
|
||||
prepdir = build['prepdir']
|
||||
arch = build['arch']
|
||||
bdisk_repo_dir = build['basedir']
|
||||
dlpath = build['dlpath']
|
||||
@@ -185,7 +189,7 @@ def prepChroot(build, bdisk, user):
|
||||
for a in arch:
|
||||
with open('{0}/root.{1}/root/VERSION_INFO.txt'.format(chrootdir, a), 'w+') as f:
|
||||
f.write(tpl_out)
|
||||
with open(tempdir + '/VERSION_INFO.txt', 'w+') as f:
|
||||
with open(prepdir + '/VERSION_INFO.txt', 'w+') as f:
|
||||
f.write(tpl_out)
|
||||
tpl = env.get_template('VARS.txt.j2')
|
||||
tpl_out = tpl.render(bdisk = bdisk, user = user)
|
||||
|
||||
Reference in New Issue
Block a user