successful build, UEFI and BIOS working. need to test USB method.
This commit is contained in:
parent
7381cc3d39
commit
4b4cbd0f63
25
.gitignore
vendored
25
.gitignore
vendored
@ -1,26 +1,19 @@
|
|||||||
# We don't want local build settings
|
# We don't want local build settings in case someone's using
|
||||||
/build.conf
|
# the git dir as a place to store their build.ini
|
||||||
|
/build.ini
|
||||||
|
|
||||||
# The chroots should be generated locally ONLY. The perms/ownership would get futzed up anyways if checked into git.
|
# The default doesn't store these in the git working dir,
|
||||||
|
# but better safe than sorry.
|
||||||
/root.x86_64
|
/root.x86_64
|
||||||
/root.i686
|
/root.i686
|
||||||
|
|
||||||
# We don't want the copied/stripped/compressed chroots
|
|
||||||
# might not be relevant anymore in the python rewrite
|
|
||||||
/build64
|
|
||||||
/build32
|
|
||||||
|
|
||||||
# We don't need these in git. They should be generated dynamically.
|
|
||||||
/http
|
/http
|
||||||
/iso
|
/iso
|
||||||
/temp
|
/temp
|
||||||
/TMPBOOT
|
|
||||||
/tftpboot
|
/tftpboot
|
||||||
/.latest.x86_64.tar.gz
|
|
||||||
/.latest.i686.tar.gz
|
# We don't need these in git. They should be generated dynamically.
|
||||||
/lockfile.lck
|
.latest.*.tar
|
||||||
/VERSION_INFO.txt
|
/buildnum
|
||||||
/BUILDNO
|
|
||||||
/screenlog*
|
/screenlog*
|
||||||
/logs
|
/logs
|
||||||
*.swp
|
*.swp
|
||||||
|
@ -1,15 +1,6 @@
|
|||||||
# NOTE: this is almost taken verbatim from https://github.com/pkgcore/pychroot's
|
|
||||||
# pychroot/scripts/pychroot.py because the pychroot.Chroot method isn't really
|
|
||||||
# documented very well
|
|
||||||
|
|
||||||
#from __future__ import absolute_import, unicode_literals
|
|
||||||
|
|
||||||
#from functools import partial
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import psutil
|
import psutil
|
||||||
#from pychroot.base import Chroot
|
|
||||||
import pychroot
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import ctypes
|
import ctypes
|
||||||
|
|
||||||
|
@ -20,3 +20,6 @@ if __name__ == '__main__':
|
|||||||
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
|
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
|
||||||
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
|
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
|
||||||
prep.postChroot(conf['build'])
|
prep.postChroot(conf['build'])
|
||||||
|
build.genImg(conf['build'], conf['bdisk'])
|
||||||
|
build.genUEFI(conf['build'], conf['bdisk'])
|
||||||
|
build.genISO(conf)
|
||||||
|
172
bdisk/build.py
172
bdisk/build.py
@ -8,26 +8,35 @@ import jinja2
|
|||||||
from urllib.request import urlopen
|
from urllib.request import urlopen
|
||||||
|
|
||||||
|
|
||||||
def genImg(build):
|
def genImg(build, bdisk):
|
||||||
arch = build['arch']
|
arch = build['arch']
|
||||||
chrootdir = build['chrootdir']
|
chrootdir = build['chrootdir']
|
||||||
archboot = build['archboot']
|
archboot = build['archboot']
|
||||||
|
basedir = build['basedir']
|
||||||
|
tempdir = build['tempdir']
|
||||||
hashes = {}
|
hashes = {}
|
||||||
hashes['sha256'] = {}
|
hashes['sha256'] = {}
|
||||||
hashes['md5'] = {}
|
hashes['md5'] = {}
|
||||||
for a in arch:
|
for a in arch:
|
||||||
|
if a == 'i686':
|
||||||
|
bitness = '32'
|
||||||
|
elif a == 'x86_64':
|
||||||
|
bitness = '64'
|
||||||
# Create the squashfs image
|
# Create the squashfs image
|
||||||
airoot = archboot + '/' + a + '/'
|
airoot = archboot + '/' + a + '/'
|
||||||
squashimg = airoot + 'airootfs.sfs'
|
squashimg = airoot + 'airootfs.sfs'
|
||||||
os.makedirs(airoot, exist_ok = True)
|
os.makedirs(airoot, exist_ok = True)
|
||||||
print("Generating squashed filesystem image for {0}. Please wait...".format(chrootdir))
|
print("Generating squashed filesystem image for {0}. Please wait...".format(chrootdir + '/root.' + a))
|
||||||
cmd = ['/usr/bin/squashfs-tools', chrootdir, squashimg, '-noappend', '-comp', 'xz']
|
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
||||||
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, bufsize = 1)
|
# /dev/null.
|
||||||
#for line in iter(proc.stdout.readline, b''):
|
DEVNULL = open(os.devnull, 'w')
|
||||||
for line in iter(proc.stdout.readline, ''):
|
cmd = ['/usr/bin/mksquashfs',
|
||||||
print(line)
|
chrootdir + '/root.' + a,
|
||||||
p.stdout.close()
|
squashimg,
|
||||||
p.wait()
|
'-no-progress',
|
||||||
|
'-noappend',
|
||||||
|
'-comp', 'xz']
|
||||||
|
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||||
# Generate the checksum files
|
# Generate the checksum files
|
||||||
print("Now generating SHA256 and MD5 hash checksum files for {0}. Please wait...".format(squashimg))
|
print("Now generating SHA256 and MD5 hash checksum files for {0}. Please wait...".format(squashimg))
|
||||||
hashes['sha256'][a] = hashlib.sha256()
|
hashes['sha256'][a] = hashlib.sha256()
|
||||||
@ -44,6 +53,18 @@ def genImg(build):
|
|||||||
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
|
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
|
||||||
with open(airoot + 'airootfs.md5', 'w+') as f:
|
with open(airoot + 'airootfs.md5', 'w+') as f:
|
||||||
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
|
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
|
||||||
|
os.makedirs(tempdir + '/boot', exist_ok = True)
|
||||||
|
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
||||||
|
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||||
|
else:
|
||||||
|
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||||
|
# We use a dict here so we can use the right filenames...
|
||||||
|
# I might change how I handle this in the future.
|
||||||
|
bootfiles = {}
|
||||||
|
bootfiles['kernel'] = ['vmlinuz-linux-' + bdisk['name'], '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
||||||
|
bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
||||||
|
for x in ('kernel', 'initrd'):
|
||||||
|
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(tempdir, bootfiles[x][1]))
|
||||||
|
|
||||||
|
|
||||||
def genUEFI(build, bdisk):
|
def genUEFI(build, bdisk):
|
||||||
@ -53,11 +74,15 @@ def genUEFI(build, bdisk):
|
|||||||
# Plus there's always multiarch.
|
# Plus there's always multiarch.
|
||||||
# I can probably do this better with a dict... TODO.
|
# I can probably do this better with a dict... TODO.
|
||||||
if 'x86_64' in arch:
|
if 'x86_64' in arch:
|
||||||
os.makedirs(tempdir + '/EFI/boot', exist_ok = True)
|
|
||||||
tempdir = build['tempdir']
|
tempdir = build['tempdir']
|
||||||
basedir = build['basedir']
|
basedir = build['basedir']
|
||||||
|
chrootdir = build['chrootdir']
|
||||||
|
mountpt = build['mountpt']
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
templates_dir = build['basedir'] + '/extra/templates'
|
||||||
efiboot_img = tempdir + '/EFI/' + bdisk['name'] + '/efiboot.img'
|
efidir = '{0}/EFI/{1}'.format(tempdir, bdisk['name'])
|
||||||
|
os.makedirs(efidir, exist_ok = True)
|
||||||
|
efiboot_img = efidir + '/efiboot.img'
|
||||||
|
os.makedirs(tempdir + '/EFI/boot', exist_ok = True)
|
||||||
## Download the EFI shells if we don't have them.
|
## Download the EFI shells if we don't have them.
|
||||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
||||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
||||||
@ -82,11 +107,15 @@ def genUEFI(build, bdisk):
|
|||||||
## But wait! That's not all! We need more binaries.
|
## But wait! That's not all! We need more binaries.
|
||||||
# http://blog.hansenpartnership.com/linux-foundation-secure-boot-system-released/
|
# http://blog.hansenpartnership.com/linux-foundation-secure-boot-system-released/
|
||||||
shim_url = 'http://blog.hansenpartnership.com/wp-uploads/2013/'
|
shim_url = 'http://blog.hansenpartnership.com/wp-uploads/2013/'
|
||||||
for f in ('bootx64.efi', 'HashTool.efi'):
|
for f in ('PreLoader.efi', 'HashTool.efi'):
|
||||||
if not os.path.isfile(tempdir + '/EFI/boot/' + f):
|
if f == 'PreLoader.efi':
|
||||||
|
fname = 'bootx64.efi'
|
||||||
|
else:
|
||||||
|
fname = f
|
||||||
|
if not os.path.isfile(tempdir + '/EFI/boot/' + fname):
|
||||||
url = shim_url + f
|
url = shim_url + f
|
||||||
url_fetch = urlopen(url)
|
url_fetch = urlopen(url)
|
||||||
with open(tempdir + '/EFI/boot/' + f) as dl:
|
with open(tempdir + '/EFI/boot/' + fname, 'wb+') as dl:
|
||||||
dl.write(url_fetch.read())
|
dl.write(url_fetch.read())
|
||||||
url_fetch.close()
|
url_fetch.close()
|
||||||
# And we also need the systemd efi bootloader.
|
# And we also need the systemd efi bootloader.
|
||||||
@ -94,13 +123,13 @@ def genUEFI(build, bdisk):
|
|||||||
os.remove(tempdir + '/EFI/boot/loader.efi')
|
os.remove(tempdir + '/EFI/boot/loader.efi')
|
||||||
shutil.copy2(chrootdir + '/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi', tempdir + '/EFI/boot/loader.efi')
|
shutil.copy2(chrootdir + '/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi', tempdir + '/EFI/boot/loader.efi')
|
||||||
# And the accompanying configs for the systemd efi bootloader, too.
|
# And the accompanying configs for the systemd efi bootloader, too.
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
||||||
env = jinja2.Environment(loader = loader)
|
env = jinja2.Environment(loader = tpl_loader)
|
||||||
os.makedirs(tempdir + '/loader/entries', exist_ok = True)
|
os.makedirs(tempdir + '/loader/entries', exist_ok = True)
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
||||||
if t == 'base':
|
if t == 'base':
|
||||||
fname = bdisk['uxname'] + '.conf'
|
fname = bdisk['uxname'] + '.conf'
|
||||||
elif ('uefi1', 'uefi2') in t:
|
elif t not in ('uefi1', 'uefi2'):
|
||||||
fname = t + '.conf'
|
fname = t + '.conf'
|
||||||
else:
|
else:
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
fname = bdisk['uxname'] + '_' + t + '.conf'
|
||||||
@ -109,14 +138,17 @@ def genUEFI(build, bdisk):
|
|||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
||||||
else:
|
else:
|
||||||
tplpath = tempdir + '/loader/entries/'
|
tplpath = tempdir + '/loader/entries/'
|
||||||
tpl = env.get_template(t)
|
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
||||||
with open(tplpath + fname, "w+") as f:
|
with open(tplpath + fname, "w+") as f:
|
||||||
f.write(tpl_out)
|
f.write(tpl_out)
|
||||||
# And we need to get filesizes (in bytes) for everything we need to include in the ESP.
|
# And we need to get filesizes (in bytes) for everything we need to include in the ESP.
|
||||||
# This is more important than it looks.
|
# This is more important than it looks.
|
||||||
|
#sizetotal = 33553920 # The spec'd EFI binary size (32MB). It's okay to go over this though (and we do)
|
||||||
|
# because xorriso sees it as a filesystem image and adjusts the ISO automagically.
|
||||||
sizetotal = 786432 # we start with 768KB and add to it for wiggle room
|
sizetotal = 786432 # we start with 768KB and add to it for wiggle room
|
||||||
sizefiles = ['/boot/' + bdisk['uxname'] + '.64.img',
|
sizefiles = ['/boot/' + bdisk['uxname'] + '.64.img',
|
||||||
|
'/boot/' + bdisk['uxname'] + '.64.kern',
|
||||||
'/EFI/boot/bootx64.efi',
|
'/EFI/boot/bootx64.efi',
|
||||||
'/EFI/boot/loader.efi',
|
'/EFI/boot/loader.efi',
|
||||||
'/EFI/boot/HashTool.efi',
|
'/EFI/boot/HashTool.efi',
|
||||||
@ -133,13 +165,14 @@ def genUEFI(build, bdisk):
|
|||||||
print("Now creating a {0} bytes EFI ESP image at {1}. Please wait...".format(sizetotal, efiboot_img))
|
print("Now creating a {0} bytes EFI ESP image at {1}. Please wait...".format(sizetotal, efiboot_img))
|
||||||
if os.path.isfile(efiboot_img):
|
if os.path.isfile(efiboot_img):
|
||||||
os.remove(efiboot_img)
|
os.remove(efiboot_img)
|
||||||
with open(efiboot_img, 'w+') as f:
|
with open(efiboot_img, 'wb+') as f:
|
||||||
f.truncate(sizetotal)
|
f.truncate(sizetotal)
|
||||||
|
DEVNULL = open(os.devnull, 'w')
|
||||||
cmd = ['/sbin/mkfs.vfat', '-F', '32', '-n', bdisk['name'] + '_EFI', efiboot_img]
|
cmd = ['/sbin/mkfs.vfat', '-F', '32', '-n', bdisk['name'] + '_EFI', efiboot_img]
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||||
cmd = ['/bin/mount', efiboot_img, build['mountpt']]
|
cmd = ['/bin/mount', efiboot_img, build['mountpt']]
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
os.makedirs(build['mountpt'] + '/' + bdisk['name'])
|
os.makedirs('{0}/EFI/{1}'.format(build['mountpt'], bdisk['name']))
|
||||||
os.makedirs(build['mountpt'] + '/EFI/boot')
|
os.makedirs(build['mountpt'] + '/EFI/boot')
|
||||||
os.makedirs(build['mountpt'] + '/loader/entries')
|
os.makedirs(build['mountpt'] + '/loader/entries')
|
||||||
# Ready for some deja vu? This is because it uses an embedded version as well for hybrid ISO.
|
# Ready for some deja vu? This is because it uses an embedded version as well for hybrid ISO.
|
||||||
@ -148,13 +181,11 @@ def genUEFI(build, bdisk):
|
|||||||
# the templates to use "if efi == 'yes'" instead.
|
# the templates to use "if efi == 'yes'" instead.
|
||||||
# function should set the "installation" path for the conf as well based on the value of efi
|
# function should set the "installation" path for the conf as well based on the value of efi
|
||||||
# parameter.
|
# parameter.
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
env = jinja2.Environment(loader = tpl_loader)
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
os.makedirs(build['mountpt'] + 'loader/entries', exist_ok = True)
|
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
||||||
if t == 'base':
|
if t == 'base':
|
||||||
fname = bdisk['uxname'] + '.conf'
|
fname = bdisk['uxname'] + '.conf'
|
||||||
elif ('uefi1', 'uefi2') in t:
|
elif t in ('uefi1', 'uefi2'):
|
||||||
fname = t + '.conf'
|
fname = t + '.conf'
|
||||||
else:
|
else:
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
fname = bdisk['uxname'] + '_' + t + '.conf'
|
||||||
@ -163,16 +194,31 @@ def genUEFI(build, bdisk):
|
|||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
||||||
else:
|
else:
|
||||||
tplpath = build['mountpt'] + '/loader/entries/'
|
tplpath = build['mountpt'] + '/loader/entries/'
|
||||||
tpl = env.get_template(t)
|
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, efi = 'yes')
|
tpl_out = tpl.render(build = build, bdisk = bdisk, efi = 'yes')
|
||||||
with open(tplpath + fname, "w+") as f:
|
with open(tplpath + fname, "w+") as f:
|
||||||
f.write(tpl_out)
|
f.write(tpl_out)
|
||||||
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi', 'shellx64_v1.efi', 'shellx64_v2.efi'):
|
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi'):
|
||||||
y = tempdir + '/EFI/boot/' + x
|
y = tempdir + '/EFI/boot/' + x
|
||||||
z = mountpt + '/EFI/boot/' + x
|
z = mountpt + '/EFI/boot/' + x
|
||||||
if os.path.isfile(z):
|
if os.path.isfile(z):
|
||||||
os.remove(z)
|
os.remove(z)
|
||||||
shutil.copy(y, z)
|
shutil.copy(y, z)
|
||||||
|
for x in ('shellx64_v1.efi', 'shellx64_v2.efi'):
|
||||||
|
y = tempdir + '/EFI/' + x
|
||||||
|
z = mountpt + '/EFI/' + x
|
||||||
|
if os.path.isfile(z):
|
||||||
|
os.remove(z)
|
||||||
|
shutil.copy(y, z)
|
||||||
|
shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, 'x86_64', bdisk['name']),
|
||||||
|
'{0}/EFI/{1}/{2}.efi'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
||||||
|
shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, 'x86_64', bdisk['name']),
|
||||||
|
'{0}/EFI/{1}/{2}.img'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
||||||
|
# TODO: support both Arch's as EFI bootable instead? Maybe? requires more research. very rare.
|
||||||
|
#shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, a, bdisk['name']),
|
||||||
|
# '{0}/EFI/{1}/{2}.{3}.efi'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
||||||
|
#shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, a, bdisk['uxname']),
|
||||||
|
# '{0}/EFI/{1}/{2}.{3}.img'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
||||||
cmd = ['/bin/umount', mountpt]
|
cmd = ['/bin/umount', mountpt]
|
||||||
subprocess.call(cmd)
|
subprocess.call(cmd)
|
||||||
return(efiboot_img)
|
return(efiboot_img)
|
||||||
@ -182,23 +228,38 @@ def genISO(conf):
|
|||||||
bdisk = conf['bdisk']
|
bdisk = conf['bdisk']
|
||||||
archboot = build['archboot']
|
archboot = build['archboot']
|
||||||
tempdir = build['tempdir']
|
tempdir = build['tempdir']
|
||||||
|
templates_dir = build['basedir'] + '/extra/templates'
|
||||||
|
arch = build['arch']
|
||||||
builddir = tempdir + '/' + bdisk['name']
|
builddir = tempdir + '/' + bdisk['name']
|
||||||
extradir = build['basedir'] + '/extra/'
|
extradir = build['basedir'] + '/extra/'
|
||||||
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
|
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
|
||||||
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
|
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
|
||||||
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/'
|
# TODO: can we use syslinux for EFI too instead of prebootloader?
|
||||||
|
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
||||||
sysl_tmp = tempdir + '/isolinux/'
|
sysl_tmp = tempdir + '/isolinux/'
|
||||||
ver = build['ver']
|
ver = bdisk['ver']
|
||||||
isofile = '{0}-{1}.iso'.format(bdisk['uxname'], bdisk['ver'])
|
isofile = '{0}-{1}.iso'.format(bdisk['uxname'], bdisk['ver'])
|
||||||
isopath = build['isodir'] + '/' + isofile
|
isopath = build['isodir'] + '/' + isofile
|
||||||
arch = build['arch']
|
arch = build['arch']
|
||||||
# In case we're building a single-arch ISO...
|
# In case we're building a single-arch ISO...
|
||||||
if len(arch) == 1:
|
if len(arch) == 1:
|
||||||
isolinux_cfg = extradir + 'templates/BIOS/isolinux.cfg.arch.j2'
|
isolinux_cfg = '/BIOS/isolinux.cfg.arch.j2'
|
||||||
if arch[0] == 'i686':
|
if arch[0] == 'i686':
|
||||||
bitness = '32'
|
bitness = '32'
|
||||||
|
efi = False
|
||||||
elif arch[0] == 'x86_64':
|
elif arch[0] == 'x86_64':
|
||||||
bitness = '64'
|
bitness = '64'
|
||||||
|
efi = True
|
||||||
|
else:
|
||||||
|
isolinux_cfg = '/BIOS/isolinux.cfg.multi.j2'
|
||||||
|
bitness = False
|
||||||
|
efi = True
|
||||||
|
if os.path.isfile(isopath):
|
||||||
|
os.remove(isopath)
|
||||||
|
if archboot != tempdir + '/' + bdisk['name']: # best to use static concat here...
|
||||||
|
if os.path.isdir(builddir):
|
||||||
|
shutil.rmtree(builddir, ignore_errors = True)
|
||||||
|
shutil.copytree(archboot, builddir)
|
||||||
if build['ipxe']:
|
if build['ipxe']:
|
||||||
ipxe = conf['ipxe']
|
ipxe = conf['ipxe']
|
||||||
if ipxe['iso']:
|
if ipxe['iso']:
|
||||||
@ -207,15 +268,6 @@ def genISO(conf):
|
|||||||
if ipxe['usb']:
|
if ipxe['usb']:
|
||||||
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
||||||
minipath = build['isodir'] + '/' + usbfile
|
minipath = build['isodir'] + '/' + usbfile
|
||||||
else:
|
|
||||||
isolinux_cfg = extradir + 'templates/BIOS/isolinux.cfg.multi.j2'
|
|
||||||
bitness = False
|
|
||||||
if os.path.isfile(isopath):
|
|
||||||
os.remove(isopath)
|
|
||||||
if archboot != tempdir + '/' + bdisk['name']: # best to use static concat here...
|
|
||||||
if os.path.isdir(builddir):
|
|
||||||
shutil.rmtree(builddir, ignore_errors = True)
|
|
||||||
shutil.copytree(archboot, builddir)
|
|
||||||
# Copy isolinux files
|
# Copy isolinux files
|
||||||
print("Now staging some files for ISO preparation. Please wait...")
|
print("Now staging some files for ISO preparation. Please wait...")
|
||||||
isolinux_files = ['isolinux.bin',
|
isolinux_files = ['isolinux.bin',
|
||||||
@ -238,8 +290,16 @@ def genISO(conf):
|
|||||||
if os.path.isfile(sysl_tmp + f):
|
if os.path.isfile(sysl_tmp + f):
|
||||||
os.remove(sysl_tmp + f)
|
os.remove(sysl_tmp + f)
|
||||||
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
||||||
|
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
||||||
|
env = jinja2.Environment(loader = tpl_loader)
|
||||||
|
tpl = env.get_template(isolinux_cfg)
|
||||||
|
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
||||||
|
with open(sysl_tmp + '/isolinux.cfg', "w+") as f:
|
||||||
|
f.write(tpl_out)
|
||||||
# And we need to build the ISO!
|
# And we need to build the ISO!
|
||||||
|
# TODO: only include UEFI support if we actually built it!
|
||||||
print("Now generating the full ISO at {0}. Please wait.".format(isopath))
|
print("Now generating the full ISO at {0}. Please wait.".format(isopath))
|
||||||
|
if efi:
|
||||||
cmd = ['/usr/bin/xorriso',
|
cmd = ['/usr/bin/xorriso',
|
||||||
'-as', 'mkisofs',
|
'-as', 'mkisofs',
|
||||||
'-iso-level', '3',
|
'-iso-level', '3',
|
||||||
@ -260,12 +320,36 @@ def genISO(conf):
|
|||||||
'-isohybrid-gpt-basdat',
|
'-isohybrid-gpt-basdat',
|
||||||
'-output', isopath,
|
'-output', isopath,
|
||||||
tempdir]
|
tempdir]
|
||||||
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, bufsize = 1)
|
else:
|
||||||
|
# UNTESTED. TODO.
|
||||||
|
# I think i want to also get rid of: -boot-load-size 4,
|
||||||
|
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
|
||||||
|
cmd = ['/usr/bin/xorriso',
|
||||||
|
'-as', 'mkisofs',
|
||||||
|
'-iso-level', '3',
|
||||||
|
'-full-iso9660-filenames',
|
||||||
|
'-volid', bdisk['name'],
|
||||||
|
'-appid', bdisk['desc'],
|
||||||
|
'-publisher', bdisk['dev'],
|
||||||
|
'-preparer', 'prepared by ' + bdisk['dev'],
|
||||||
|
'-eltorito-boot', 'isolinux/isolinux.bin',
|
||||||
|
'-eltorito-catalog', 'isolinux/boot.cat',
|
||||||
|
'-no-emul-boot',
|
||||||
|
'-boot-load-size', '4',
|
||||||
|
'-boot-info-table',
|
||||||
|
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
|
||||||
|
'-no-emul-boot',
|
||||||
|
'-isohybrid-gpt-basdat',
|
||||||
|
'-output', isopath,
|
||||||
|
tempdir]
|
||||||
|
DEVNULL = open(os.devnull, 'w')
|
||||||
|
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||||
|
#proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, bufsize = 1)
|
||||||
#for line in iter(proc.stdout.readline, b''):
|
#for line in iter(proc.stdout.readline, b''):
|
||||||
for line in iter(proc.stdout.readline, ''):
|
#for line in iter(proc.stdout.readline, ''):
|
||||||
print(line)
|
# print(line)
|
||||||
p.stdout.close()
|
#p.stdout.close()
|
||||||
p.wait()
|
#p.wait()
|
||||||
|
|
||||||
def cleanUp():
|
def cleanUp():
|
||||||
# TODO: clear out all of tempdir?
|
# TODO: clear out all of tempdir?
|
||||||
|
@ -4,6 +4,7 @@ import platform
|
|||||||
import re
|
import re
|
||||||
import configparser
|
import configparser
|
||||||
import validators
|
import validators
|
||||||
|
import git
|
||||||
from socket import getaddrinfo
|
from socket import getaddrinfo
|
||||||
|
|
||||||
def getOS():
|
def getOS():
|
||||||
@ -54,12 +55,18 @@ def parseConfig(conf):
|
|||||||
config = configparser.ConfigParser()
|
config = configparser.ConfigParser()
|
||||||
config._interpolation = configparser.ExtendedInterpolation()
|
config._interpolation = configparser.ExtendedInterpolation()
|
||||||
config.read(conf)
|
config.read(conf)
|
||||||
|
bdisk_repo_dir = config['build']['basedir']
|
||||||
# a dict makes this so much easier.
|
# a dict makes this so much easier.
|
||||||
config_dict = {s:dict(config.items(s)) for s in config.sections()}
|
config_dict = {s:dict(config.items(s)) for s in config.sections()}
|
||||||
# Convert the booleans to pythonic booleans in the dict...
|
# Convert the booleans to pythonic booleans in the dict...
|
||||||
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
|
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
|
||||||
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
|
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
|
||||||
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
|
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
|
||||||
|
# Get the version...
|
||||||
|
if config_dict['bdisk']['ver'] == '':
|
||||||
|
repo = git.Repo(config_dict['build']['basedir'])
|
||||||
|
refs = repo.git.describe(repo.head.commit).split('-')
|
||||||
|
config_dict['bdisk']['ver'] = refs[0] + '-' + refs[2]
|
||||||
for i in ('http', 'tftp', 'rsync', 'git'):
|
for i in ('http', 'tftp', 'rsync', 'git'):
|
||||||
config_dict['sync'][i] = config['sync'].getboolean(i)
|
config_dict['sync'][i] = config['sync'].getboolean(i)
|
||||||
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
|
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
|
||||||
|
@ -3,9 +3,11 @@ import shutil
|
|||||||
import jinja2
|
import jinja2
|
||||||
import git
|
import git
|
||||||
import patch
|
import patch
|
||||||
|
import OpenSSL
|
||||||
|
|
||||||
|
|
||||||
def sslIPXE():
|
def sslIPXE(conf):
|
||||||
|
# http://www.pyopenssl.org/en/stable/api/crypto.html#pkey-objects
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def buildIPXE(conf):
|
def buildIPXE(conf):
|
||||||
@ -33,8 +35,8 @@ def buildIPXE(conf):
|
|||||||
#os.chdir(ipxe_src + '/src')
|
#os.chdir(ipxe_src + '/src')
|
||||||
for p in ('01.git-version.patch.j2', '02.banner.patch.j2'):
|
for p in ('01.git-version.patch.j2', '02.banner.patch.j2'):
|
||||||
try:
|
try:
|
||||||
patch = fromfile(p)
|
patchfile = patch.fromfile(patches_dir + '/' + p)
|
||||||
patch.apply(strip = 2, root = ipxe_src + '/src')
|
patchfile.apply(strip = 2, root = ipxe_src + '/src')
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
#os.chdir(cwd)
|
#os.chdir(cwd)
|
||||||
|
@ -6,7 +6,7 @@ import gnupg
|
|||||||
import tarfile
|
import tarfile
|
||||||
import subprocess
|
import subprocess
|
||||||
import re
|
import re
|
||||||
import git
|
#import git
|
||||||
import jinja2
|
import jinja2
|
||||||
import datetime
|
import datetime
|
||||||
from urllib.request import urlopen
|
from urllib.request import urlopen
|
||||||
@ -72,8 +72,8 @@ def downloadTarball(build):
|
|||||||
tarball_path[a], sha1))
|
tarball_path[a], sha1))
|
||||||
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
|
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
|
||||||
if tarball_hash != sha1:
|
if tarball_hash != sha1:
|
||||||
exit(("There was a failure fetching {0} and the wrong version exists on the filesystem.\n" +
|
exit(("{0} either did not download correctly or a wrong (probably old) version exists on the filesystem.\n" +
|
||||||
"Please try again later.").format(tarball))
|
"Please delete it and try again.").format(tarball))
|
||||||
elif build['mirrorgpgsig'] != '':
|
elif build['mirrorgpgsig'] != '':
|
||||||
# okay, so the sha1 matches. let's verify the signature.
|
# okay, so the sha1 matches. let's verify the signature.
|
||||||
if build['mirrorgpgsig'] == '.sig':
|
if build['mirrorgpgsig'] == '.sig':
|
||||||
@ -156,15 +156,16 @@ def buildChroot(build):
|
|||||||
|
|
||||||
def prepChroot(build, bdisk):
|
def prepChroot(build, bdisk):
|
||||||
chrootdir = build['chrootdir']
|
chrootdir = build['chrootdir']
|
||||||
|
tempdir = build['tempdir']
|
||||||
arch = build['arch']
|
arch = build['arch']
|
||||||
bdisk_repo_dir = build['basedir']
|
bdisk_repo_dir = build['basedir']
|
||||||
templates_dir = bdisk_repo_dir + '/extra/templates'
|
templates_dir = bdisk_repo_dir + '/extra/templates'
|
||||||
build = {}
|
build = {}
|
||||||
# let's prep some variables to write out the version info.txt
|
## let's prep some variables to write out the version info.txt
|
||||||
# get the git tag and short commit hash
|
## get the git tag and short commit hash
|
||||||
repo = git.Repo(bdisk_repo_dir)
|
#repo = git.Repo(bdisk_repo_dir)
|
||||||
refs = repo.git.describe(repo.head.commit).split('-')
|
#refs = repo.git.describe(repo.head.commit).split('-')
|
||||||
build['ver'] = refs[0] + '-' + refs[2]
|
#build['ver'] = refs[0] + '-' + refs[2]
|
||||||
# and these should be passed in from the args, from the most part.
|
# and these should be passed in from the args, from the most part.
|
||||||
build['name'] = bdisk['name']
|
build['name'] = bdisk['name']
|
||||||
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
|
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
|
||||||
@ -176,9 +177,11 @@ def prepChroot(build, bdisk):
|
|||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
loader = jinja2.FileSystemLoader(templates_dir)
|
||||||
env = jinja2.Environment(loader = loader)
|
env = jinja2.Environment(loader = loader)
|
||||||
tpl = env.get_template('VERSION_INFO.txt.j2')
|
tpl = env.get_template('VERSION_INFO.txt.j2')
|
||||||
tpl_out = tpl.render(build = build, hostname = host.getHostname())
|
tpl_out = tpl.render(build = build, bdisk = bdisk, hostname = host.getHostname())
|
||||||
for a in arch:
|
for a in arch:
|
||||||
with open(chrootdir + '/root.' + a + '/root/VERSION_INFO.txt', "w+") as f:
|
with open('{0}/root.{1}/root/VERSION_INFO.txt'.format(chrootdir, a), "w+") as f:
|
||||||
|
f.write(tpl_out)
|
||||||
|
with open(tempdir + '/VERSION_INFO.txt', "w+") as f:
|
||||||
f.write(tpl_out)
|
f.write(tpl_out)
|
||||||
return(build)
|
return(build)
|
||||||
|
|
||||||
|
@ -44,6 +44,12 @@ uxname = bdisk
|
|||||||
; 2.) ASCII *only*
|
; 2.) ASCII *only*
|
||||||
pname = BDisk
|
pname = BDisk
|
||||||
|
|
||||||
|
; What version is this?
|
||||||
|
; If we don't have a version specified here, we'll
|
||||||
|
; try to guess based on the current git commit in build:basedir.
|
||||||
|
; 0.) No whitespace
|
||||||
|
ver =
|
||||||
|
|
||||||
; Your/your organization's name.
|
; Your/your organization's name.
|
||||||
; The same rules as 'pname' apply:
|
; The same rules as 'pname' apply:
|
||||||
; 0.) Can contain whitespace
|
; 0.) Can contain whitespace
|
||||||
@ -206,7 +212,7 @@ basedir = /opt/dev/bdisk
|
|||||||
; (The files will be very big!)
|
; (The files will be very big!)
|
||||||
; 0.) No whitespace
|
; 0.) No whitespace
|
||||||
; 1.) Will be created if it doesn't exist
|
; 1.) Will be created if it doesn't exist
|
||||||
isodir = ${basedir}/iso
|
isodir = ${dlpath}/iso
|
||||||
|
|
||||||
; This is a directory where we should save extra
|
; This is a directory where we should save extra
|
||||||
; source code we download (if we need it).
|
; source code we download (if we need it).
|
||||||
@ -217,7 +223,7 @@ srcdir = ${basedir}/extrasrc
|
|||||||
; What directory should we use for staging?
|
; What directory should we use for staging?
|
||||||
; 0.) No whitespace
|
; 0.) No whitespace
|
||||||
; 1.) Will be created if it doesn't exist
|
; 1.) Will be created if it doesn't exist
|
||||||
tempdir = ${basedir}/temp
|
tempdir = ${dlpath}/temp
|
||||||
|
|
||||||
; Where should we stage the boot files?
|
; Where should we stage the boot files?
|
||||||
; This should not be the same dir as other options!
|
; This should not be the same dir as other options!
|
||||||
|
@ -149,4 +149,5 @@ fi
|
|||||||
yes | pacman -Scc
|
yes | pacman -Scc
|
||||||
rm -f /root/.bash_history
|
rm -f /root/.bash_history
|
||||||
rm -f /root/.viminfo
|
rm -f /root/.viminfo
|
||||||
|
rm -f /root/apacman-*.pkg.tar.xz
|
||||||
|
rm -f /root/pre-build.sh
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
title {{ bdisk['pname'] }} (Media)
|
title {{ bdisk['pname'] }} (Media)
|
||||||
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else $}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
||||||
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else $}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
||||||
options archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
options archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
title {{ bdisk['pname'] }} (RAM)
|
title {{ bdisk['pname'] }} (RAM)
|
||||||
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else $}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
||||||
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else $}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
||||||
options copytoram archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
options copytoram archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Version: {{ build['ver'] }}
|
Version: {{ bdisk['ver'] }}
|
||||||
Build: {{ build['name'] }}
|
Build: {{ build['name'] }}
|
||||||
Time: {{ build['time'] }}
|
Time: {{ build['time'] }}
|
||||||
Machine: {{ hostname }}
|
Machine: {{ hostname }}
|
||||||
|
Loading…
Reference in New Issue
Block a user