checking in- currently trying to get the cleanup sorted. i need something that works better, i think

This commit is contained in:
brent s. 2016-11-26 22:05:53 -05:00
parent 36c7da470a
commit b95bef3b17
10 changed files with 391 additions and 183 deletions

View File

@ -11,13 +11,15 @@ import psutil
#from pychroot.base import Chroot
import pychroot
import subprocess
import ctypes

#class mountpoints(argparse.Action):
#
# def __call__(self, parser, namespace, values, option_string=None):
# if not getattr(namespace, 'mountpoints', False):
# namespace.mountpoints = {}
# namespace.mountpoints.update(values)

def chrootMount(source, target, fs, options=''):
ret = ctypes.CDLL('libc.so.6', use_errno=True).mount(source, target, fs, 0, options)
if ret < 0:
errno = ctypes.get_errno()
raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
format(source, fs, target, options, os.strerror(errno)))

def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
# MOUNT the chroot
@ -25,44 +27,48 @@ def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
mounts = []
for m in mountpoints:
mounts.append(m.mountpoint)
cmnts = {}
# mount the chrootdir... onto itself. as a bind mount. it's so stupid, i know. see https://bugs.archlinux.org/task/46169
if chrootdir not in mounts:
#cmnts[chrootdir + ':' + chrootdir] = {'recursive': False, 'readonly': False, 'create': False}
cmnts[chrootdir + ':/'] = {'recursive': False, 'readonly': False, 'create': False}

subprocess.call(['mount', '--bind', chrootdir, chrootdir])
### The following mountpoints don't seem to mount properly with pychroot. save it for v3.n+1. TODO. ###
# bind-mount so we can resolve things inside
if (chrootdir + '/etc/resolv.conf') not in mounts:
subprocess.call(['mount', '--bind', '-o', 'ro', '/etc/resolv.conf', chrootdir + '/etc/resolv.conf'])
# mount -t proc to chrootdir + '/proc' here
if (chrootdir + '/proc') not in mounts:
cmnts['proc:/proc'] = {'recursive': True, 'create': True}

subprocess.call(['mount', '-t', 'proc', '-o', 'nosuid,noexec,nodev', 'proc', chrootdir + '/proc'])
# rbind mount /sys to chrootdir + '/sys' here
if (chrootdir + '/sys') not in mounts:
#cmnts['/sys:/sys'] = {'recursive': True, 'create': True} # if the below doesn't work, try me. can also try ['sysfs:/sys']
cmnts['/sys'] = {'recursive': True, 'create': True}

# rbind mount /dev to chrootdir + '/dev' here
if (chrootdir + '/dev') not in mounts:
cmnts['/dev'] = {'recursive': True, 'create': True}

subprocess.call(['mount', '-t', 'sysfs', '-o', 'nosuid,noexec,nodev,ro', 'sys', chrootdir + '/sys'])
# mount the efivars in the chroot if it exists on the host. i mean, why not?
if '/sys/firmware/efi/efivars' in mounts:
if (chrootdir + '/sys/firmware/efi/efivars') not in mounts:
cmnts['/sys/firmware/efi/efivars'] = {'recursive': True}

subprocess.call(['mount', '-t', 'efivarfs', '-o', 'nosuid,noexec,nodev', 'efivarfs', chrootdir + '/sys/firmware/efi/efivars'])
# rbind mount /dev to chrootdir + '/dev' here
if (chrootdir + '/dev') not in mounts:
subprocess.call(['mount', '-t', 'devtmpfs', '-o', 'mode=0755,nosuid', 'udev', chrootdir + '/dev'])
if (chrootdir + '/dev/pts') not in mounts:
subprocess.call(['mount', '-t', 'devpts', '-o', 'mode=0620,gid=5,nosuid,noexec', 'devpts', chrootdir + '/dev/pts'])
if '/dev/shm' in mounts:
if (chrootdir + '/dev/shm') not in mounts:
subprocess.call(['mount', '-t', 'tmpfs', '-o', 'mode=1777,nosuid,nodev', 'shm', chrootdir + '/dev/shm'])
if '/run' in mounts:
if (chrootdir + '/run') not in mounts:
cmnts['/run'] = {'recursive': True}
subprocess.call(['mount', '-t', 'tmpfs', '-o', 'nosuid,nodev,mode=0755', 'run', chrootdir + '/run'])
if '/tmp' in mounts:
if (chrootdir + '/tmp') not in mounts:
subprocess.call(['mount', '-t', 'tmpfs', '-o', 'mode=1777,strictatime,nodev,nosuid', 'tmp', chrootdir + '/tmp'])

pychroot.base.Chroot.default_mounts = {}
chroot = pychroot.base.Chroot(chrootdir, mountpoints = cmnts, hostname = chroot_hostname)
chroot.mount()
with chroot:
import os
os.system(cmd)
chroot.cleanup()
return(chrootdir, cmnts)
print("Now performing '{0}' in chroot for {1}...".format(cmd, chrootdir))
print("You can view the progress via:\n\n\ttail -f {0}/var/log/chroot_install.log\n".format(chrootdir))
real_root = os.open("/", os.O_RDONLY)
os.chroot(chrootdir)
os.system('/root/pre-build.sh')
os.fchdir(real_root)
os.chroot('.')
os.close(real_root)
return(chrootdir)

#def chrootUnmount(chrootdir, cmnts):
def chrootUnmount(chrootdir):
# TODO: https://github.com/pkgcore/pychroot/issues/22 try to do this more pythonically. then we can remove subprocess
subprocess.call(['umount', '-lR', chrootdir])

View File

@ -1,16 +1,10 @@
#!/bin/env python3
import host
import prep
import bchroot
import build

# we need to:
# 1.) import the config- this gives us info about things like build paths, etc. host.parseConfig(host.getConfig()) should do this
# 2.) prep.dirChk
# 3.) prep.downloadTarball
# 4.) prep.unpackTarball
# 5.) prep.buildChroot
# 6.) prep.prepChroot
# 7.) bchroot.chrootCmd (TODO)- this should run the <chroot>/root/pre-build.sh script
# 7.5) ....figure out a way to get those dirs to *un*mount... and only mount in 7. if they're not currently mounted.
# 8.) build.chrootClean (TODO) see jenny_craig in old bdisk. i can *probably* do this within the chroot for the most part as part of pre-build.sh
# 9.) build.genImg (TODO)- build the squashed image, etc. see will_it_blend in old bdisk
# 10.) build.genUEFI (TODO)- build the uefi binary/bootloading. see stuffy in old bdisk
@ -22,18 +16,10 @@ if __name__ == '__main__':
# TODO: config for chrootdir, dlpath
conf = host.parseConfig(host.getConfig())[1]
prep.dirChk(conf)
if conf['build']['multiarch']:
for arch in ('x86_64', 'i686'):
#prep.unpackTarball(prep.downloadTarball(arch, '/var/tmp/bdisk'), '/var/tmp/chroot/' + arch)
prep.buildChroot(arch, '/var/tmp/chroot/' + arch, '/var/tmp/bdisk', conf['build']['basedir'] + '/extra')
prep.prepChroot(conf['build']['basedir'] + '/extra/templates', '/var/tmp/chroot/' + arch, conf['bdisk'], arch)
bchroot.chroot('/var/tmp/chroot/' + arch, 'bdisk.square-r00t.net')
bchroot.chrootUnmount('/var/tmp/chroot/' + arch)
else:
# TODO: implement specific-arch building or separate building instances
for arch in ('x86_64', 'i686'):
#prep.unpackTarball(prep.downloadTarball(arch, '/var/tmp/bdisk'), '/var/tmp/chroot/' + arch)
prep.buildChroot(arch, '/var/tmp/chroot/' + arch, '/var/tmp/bdisk', conf['build']['basedir'] + '/extra')
prep.prepChroot(conf['build']['basedir'] + '/extra/templates', '/var/tmp/chroot/' + arch, conf['bdisk'], arch)
bchroot.chroot('/var/tmp/chroot/' + arch, 'bdisk.square-r00t.net')
bchroot.chrootUnmount('/var/tmp/chroot/' + arch)
prep.buildChroot(conf['build'])
prep.prepChroot(conf['build']['basedir'] + '/extra/templates', conf['build'], conf['bdisk'])
arch = conf['build']['arch']
for a in arch:
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
build.chrootClean(conf['build'])

112
bdisk/build.py Executable file
View File

@ -0,0 +1,112 @@
import os
import tarfile
import shutil
import glob


def chrootClean(build):
chrootdir = build['chrootdir']
arch = build['arch']
backupdir = build['dlpath'] + '/' + 'bak'
os.makedirs(backupdir, exist_ok = True)
## Save some stuff that needs to be retained.
# Compress the pacman cache.
for a in arch:
os.makedirs(chrootdir + '/root.' + a + '/usr/local/pacman', exist_ok = True)
tarball = chrootdir + '/root.' + a + '/usr/local/pacman/pacman.db.tar.xz'
dbdir = chrootdir + '/root.' + a + '/var/lib/pacman/local'
print("Now cleaning {0}/root.{1}. Please wait...".format(chrootdir, a))
if os.path.isfile(tarball):
os.remove(tarball)
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this balks, try x:xz
tar.add(dbdir, arcname = os.path.basename(dbdir))
# Cut out the fat
# The following are intended as "overrides" of the paths we'll be deleting.
backup = {}
backup['dirs'] = ['/var/lib/pacman/local']
backup['files'] = ['/usr/share/locale/locale.alias',
'/usr/share/zoneinfo/EST5EDT',
'/usr/share/zoneinfo/UTC',
'/usr/share/locale/en',
'/usr/share/locale/en_US',
'/usr/share/locale/en_GB']
# And these are what we remove.
delete = {}
delete['dirs'] = ['/usr/share/locale/*',
'/var/cache/pacman/*',
'/var/cache/pkgfile/*',
'/var/cache/apacman/pkg/*',
'/var/lib/pacman/*',
'/var/abs/local/yaourtbuild/*',
'/usr/share/zoneinfo',
'/root/.gnupg',
'/tmp/*',
'/var/tmp/*',
'/var/abs/*',
'/run/*',
'/boot/*',
'/usr/src/*',
'/var/log/*',
'/.git']
delete['files'] = ['/root/.bash_history',
'/root/apacman*',
'/root/iso.pkgs*',
'/root/packages.*',
'/root/pre-build.sh',
'/root/.viminfo',
'/root/.bashrc']
# First we backup files. We don't need to create backup['dirs']
# since they should be empty. If not, they go in backup['files'].
for f in backup['files']:
#os.makedirs(backupdir + '/root.' + a + os.path.dirname(f), exist_ok = True)
#shutil.copy2(chrootdir + '/root.' + a + f, backupdir + '/root.' + a + f)
for root, dirs, files in os.walk(f):
for item in files:
src_path = os.path.join(root, item)
dst_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, ''))
if os.path.exists(dst_path):
if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime:
shutil.copy2(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
for item in dirs:
src_path = os.path.join(root, item)
dst_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, ''))
os.makedirs(dst_path, exist_ok = True)
# Now we delete the above.
for f in delete['files']:
for x in glob.glob(chrootdir + '/root.' + a + f):
os.remove(x)
for d in delete['dirs']:
for x in glob.glob(chrootdir + '/root.' + a + d):
#os.remove(x)
shutil.rmtree(x)
# And restore the dirs/files
for d in backup['dirs']:
os.makedirs(chrootdir + '/root.' + a + d, exist_ok = True)
for f in backup['files']:
#os.makedirs(chrootdir + '/root.' + a + os.path.dirname(f), exist_ok = True)
#shutil.copy2(backupdir + '/root.' + a + f, chrootdir + '/root.' + a + f)
for root, dirs, files in os.walk(f):
for item in files:
src_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, ''))
dst_path = os.path.join(root, item)
if os.path.exists(dst_path):
if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime:
shutil.copy2(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
for item in dirs:
src_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, ''))
dst_path = os.path.join(root, item)
os.makedirs(dst_path, exist_ok = True)
#shutil.rmtree(backupdir)

def genImg():
pass

def genUEFI():
pass

def genISO():
pass

View File

@ -17,6 +17,10 @@ def getBits():
bits = list(platform.architecture())[0]
return(bits)

def getHostname():
hostname = platform.node()
return(hostname)

def getConfig(conf_file='/etc/bdisk/build.ini'):
conf = False
# define some defailt conf paths in case we're installed by
@ -55,12 +59,32 @@ def parseConfig(conf):
# Convert the booleans to pythonic booleans in the dict...
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
config_dict['build']['multiarch'] = config['build'].getboolean('multiarch')
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
for i in ('http', 'tftp', 'rsync', 'git'):
config_dict['sync'][i] = config['sync'].getboolean(i)
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb')
# and build a list of arch(es) we want to build
if config_dict['build']['multiarch'] in ('','yes','true','1'):
config_dict['build']['arch'] = ['x86_64','i686']
elif config_dict['build']['multiarch'] == 'x86_64':
config_dict['build']['arch'] = ['x86_64']
elif config_dict['build']['multiarch'] == 'i686':
config_dict['build']['arch'] = ['i686']
else:
exit(('ERROR: {0} is not a valid value. Check your configuration.').format(
config_dict['build']['multiarch']))
## VALIDATORS ##
# Validate bootstrap mirror
if (validators.domain(config_dict['build']['mirror']) or validators.ipv4(
config_dict['build']['mirror']) or validatords.ipv6(
config_dict['build']['mirror'])):
try:
getaddrinfo(config_dict['build']['mirror'], None)
except:
exit(('ERROR: {0} does not resolve and cannot be used as a ' +
'mirror for the bootstrap tarballs. Check your configuration.').format(
config_dict['build']['host']))
# Are we rsyncing? If so, validate the rsync host.
# Works for IP address too. It does NOT check to see if we can
# actually *rsync* to it; that'll come later.
@ -89,7 +113,7 @@ def parseConfig(conf):
"Check your configuration.").format(config_dict['build']['basedir']))
# Make dirs if they don't exist
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'):
os.makedirs(config_dict['build'][d], exists_ok = True)
os.makedirs(config_dict['build'][d], exist_ok = True)
# Make dirs for sync staging if we need to
for x in ('http', 'tftp'):
if config_dict['sync'][x]:

View File

@ -10,107 +10,115 @@ import git
import jinja2
import datetime
from urllib.request import urlopen

def archChk(arch):
if arch in ['i686', 'x86_64']:
return(arch)
else:
exit("{0} is not a valid architecture. Must be one of i686 or x86_64.".format(arch))
import host # bdisk.host

def dirChk(config_dict):
# Make dirs if they don't exist
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'):
os.makedirs(config_dict['build'][d], exists_ok = True)
os.makedirs(config_dict['build'][d], exist_ok = True)
# Make dirs for sync staging if we need to
for x in ('http', 'tftp'):
if config_dict['sync'][x]:
os.makedirs(config_dict[x]['path'], exist_ok = True)

def downloadTarball(arch, dlpath):
# arch - should be i686 or x86_64
def downloadTarball(build):
dlpath = build['dlpath']
arch = build['arch']
#mirror = 'http://mirrors.kernel.org/archlinux'
mirror = build['mirrorproto'] + '://' + build['mirror']
rlsdir = mirror + build['mirrorpath']
sha_in = urlopen(mirror + build['mirrorchksum'])
# returns path/filename e.g. /some/path/to/file.tar.gz
# we use .gnupg since we'll need it later.
archChk(arch)
try:
os.makedirs(dlpath + '/.gnupg')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
tarball_path = dlpath + '/.latest.' + arch + '.tar.gz'
#mirror = 'http://mirrors.kernel.org/archlinux'
mirror = 'https://mirror.us.leaseweb.net/archlinux'
rlsdir = mirror + '/iso/latest'
sha_in = urlopen(rlsdir + '/sha1sums.txt')
os.makedirs(dlpath + '/.gnupg', exist_ok = True)
tarball_path = {}
for x in arch:
tarball_path[x] = dlpath + '/.latest.' + x + '.tar'
sha1sums = sha_in.read()
sha_in.close()
sha1_list = sha1sums.decode("utf-8")
sha_list = list(filter(None, sha1_list.split('\n')))
sha_raw = sha1sums.decode("utf-8")
sha_list = list(filter(None, sha_raw.split('\n')))
sha_dict = {x.split()[1]: x.split()[0] for x in sha_list}
pattern = re.compile('^archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-' + arch + '\.tar\.gz$')
tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0]
sha1 = sha_dict[tarball]
# all that lousy work just to get a sha1 sum. okay. so.
if os.path.isfile(tarball_path):
pass
else:
# fetch the tarball...
print("Fetching the tarball for {0} architecture, please wait...".format(arch))
tarball_dl = urlopen(rlsdir + tarball)
with open(dlpath + '/latest.' + arch + '.tar.gz', 'wb') as f:
f.write(tarball_dl)
tarball_dl.close()
tarball_hash = hashlib.sha1(open(tarball_path, 'rb').read()).hexdigest()
if tarball_hash != sha1:
exit("There was a failure fetching the tarball and the wrong version exists on the filesystem.\nPlease try again later.")
else:
# okay, so the sha1 matches. let's verify the signature.
if build['mirrorgpgsig'] != '':
# we don't want to futz with the user's normal gpg.
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
print("\nNow generating a GPG key. Please wait...")
# python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af.
# TODO: remove this when the error doesn't happen anymore.
print("If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error, it can be safely ignored.")
print("If this is taking a VERY LONG time, try installing haveged and starting it. This can be " +
"done safely in parallel with the build process.\n")
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
key = gpg.gen_key(input_data)
keyid = '7F2D434B9741E8AC'
gpg.recv_keys('pgp.mit.edu', keyid)
gpg_sig = tarball + '.sig'
sig_dl = urlopen(rlsdir + gpg_sig)
with open(tarball_path + '.sig', 'wb+') as f:
f.write(sig_dl)
sig_dl.close()
sig = tarball_path + '.sig'
tarball_data = open(tarball_path, 'rb')
tarball_data_in = tarball_data.read()
gpg_verify = gpg.verify_data(sig, tarball_data_in)
tarball_data.close()
if not gpg_verify:
exit("There was a failure checking the signature of the release tarball. Please investigate.")
os.remove(sig)
keyid = build['gpgkey']
gpg.recv_keys(build['gpgkeyserver'], keyid)
for a in arch:
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0]
sha1 = sha_dict[tarball]
if os.path.isfile(tarball_path[a]):
pass
else:
# fetch the tarball...
print("Fetching the tarball for {0} architecture, please wait...".format(a))
#dl_file = urllib.URLopener()
tarball_dl = urlopen(rlsdir + tarball)
with open(tarball_path[a], 'wb') as f:
f.write(tarball_dl.read())
tarball_dl.close()
print(("Checking that the hash checksum for {0} matches {1}, please wait...").format(
tarball_path[a], sha1))
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
if tarball_hash != sha1:
exit(("There was a failure fetching {0} and the wrong version exists on the filesystem.\n" +
"Please try again later.").format(tarball))
elif build['mirrorgpgsig'] != '':
# okay, so the sha1 matches. let's verify the signature.
if build['mirrorgpgsig'] == '.sig':
gpgsig_remote = rlsdir + tarball + '.sig'
else:
gpgsig_remote = mirror + build['mirrorgpgsig']
gpg_sig = tarball + '.sig'
sig_dl = urlopen(gpgsig_remote)
sig = tarball_path[a] + '.sig'
with open(sig, 'wb+') as f:
f.write(sig_dl.read())
sig_dl.close()
tarball_data = open(tarball_path[a], 'rb')
tarball_data_in = tarball_data.read()
gpg_verify = gpg.verify_data(sig, tarball_data_in)
tarball_data.close()
if not gpg_verify:
exit("There was a failure checking {0} against {1}. Please investigate.".format(
sig, tarball_path[a]))
os.remove(sig)

return(tarball_path)

def unpackTarball(tarball_path, chrootdir):
def unpackTarball(tarball_path, build):
chrootdir = build['chrootdir']
# Make the dir if it doesn't exist
try:
os.makedirs(chrootdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
shutil.rmtree(chrootdir, ignore_errors = True)
os.makedirs(chrootdir, exist_ok = True)
print("Now extracting the tarball(s). Please wait...")
# Open and extract the tarball
tar = tarfile.open(tarball_path, 'r:gz')
tar.extractall(path = destdir)
tar.close()
return(True)
for a in build['arch']:
tar = tarfile.open(tarball_path[a], 'r:gz')
tar.extractall(path = chrootdir)
tar.close()
print("Extraction for {0} finished.".format(tarball_path[a]))

def buildChroot(arch, chrootdir, dlpath, extradir):
unpack_me = unpackTarball(downloadTarball(archChk(arch), dlpath), chrootdir)
if unpack_me:
pass
else:
exit("Something went wrong when trying to unpack the tarball.")

print("The download and extraction has completed. Now prepping the chroot environment with some additional changes.")
def buildChroot(build):
dlpath = build['dlpath']
chrootdir = build['chrootdir']
arch = build['arch']
extradir = build['basedir'] + '/extra'
unpack_me = unpackTarball(downloadTarball(build), build)
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
prebuild_overlay = {}
prebuild_arch_overlay = {}
for x in ['i686', 'x86_64']:
for x in arch:
prebuild_arch_overlay[x] = {}
for y in ['files', 'dirs']:
prebuild_overlay[y] = []
@ -129,40 +137,45 @@ def buildChroot(arch, chrootdir, dlpath, extradir):
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
prebuild_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
for dir in prebuild_overlay['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
# and copy over the files. again, chown to root.
for file in prebuild_overlay['files']:
shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
# do the same for arch-specific stuff.
for dir in prebuild_arch_overlay[arch]['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
for file in prebuild_arch_overlay[arch]['files']:
shutil.copy2(extradir + '/pre-build.d/' + arch + '/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
return(chrootdir)
for a in arch:
for dir in prebuild_overlay['dirs']:
os.makedirs(chrootdir + '/root.' + a + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/root.' + a + '/' + dir, 0, 0)
# and copy over the files. again, chown to root.
for file in prebuild_overlay['files']:
shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/root.' + a + '/' + file)
os.chown(chrootdir + '/root.' + a + '/' + file, 0, 0)
# do the same for arch-specific stuff.
for dir in prebuild_arch_overlay[a]['dirs']:
os.makedirs(chrootdir + '/root.' + a + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/root.' + a + '/' + dir, 0, 0)
for file in prebuild_arch_overlay[a]['files']:
shutil.copy2(extradir + '/pre-build.d/' + a + '/' + file, chrootdir + '/root.' + a + '/' + file)
os.chown(chrootdir + '/root.' + a + '/' + file, 0, 0)

def prepChroot(templates_dir, chrootdir, bdisk, arch):
def prepChroot(templates_dir, build, bdisk):
chrootdir = build['chrootdir']
arch = build['arch']
bdisk_repo_dir = build['basedir']
build = {}
# let's prep some variables to write out the version info.txt
# get the git tag and short commit hash
repo = git.Repo(bdisk['dir'])
repo = git.Repo(bdisk_repo_dir)
refs = repo.git.describe(repo.head.commit).split('-')
build['ver'] = refs[0] + '-' + refs[2]
# and these should be passed in from the args, from the most part.
build['name'] = bdisk['name']
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
build['host'] = bdisk['hostname']
hostname = host.getHostname
build['user'] = os.environ['USER']
if os.environ['SUDO_USER']:
if 'SUDO_USER' in os.environ:
build['realuser'] = os.environ['SUDO_USER']
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
env = jinja2.Environment(loader=FileSystemLoader(templates_dir))
loader = jinja2.FileSystemLoader(templates_dir)
env = jinja2.Environment(loader = loader)
tpl = env.get_template('VERSION_INFO.txt.j2')
tpl_out = template.render(build = build)
with open(chrootdir + '/root/VERSION_INFO.txt', "wb+") as f:
fh.write(tpl_out)
tpl_out = tpl.render(build = build, hostname = hostname)
for a in arch:
with open(chrootdir + '/root.' + a + '/root/VERSION_INFO.txt', "w+") as f:
f.write(tpl_out)
return(build)

View File

@ -123,6 +123,72 @@ password =
#---------------------------------------------------------#
[build]

; What is the mirror for your bootstrap tarball?
; It is *highly* recommended you use an Arch Linux tarball
; as the build process is highly specialized to this.
; 0.) No whitespace
; 1.) Must be accessible remotely (no local file paths)
mirror = mirror.us.leaseweb.net

; What is the protocol for the bootstrap mirror?
; 0.) Must be one of:
; http, https, ftp
mirrorproto = https

; What is the path to the tarball directory?
; 0.) Must be a complete path
; (e.g. /dir1/subdir1/subdir2/
; 1.) No whitespace
mirrorpath = /archlinux/iso/latest/

; What is the filename for the tarball found in the above?
; If left blank, we will use the sha1 checksum file to try
; to guess the most recent file.
mirrorfile =

; What is the path to a sha1 checksum file?
; 0.) No whitespace
; 1.) Must be the full path
; 2.) Don't include the mirror domain or protocol
mirrorchksum = ${mirrorpath}sha1sums.txt

; Optional GPG checking.
; If the file has a GPG signature file,
; we can use it for extra checking.
; If it's blank, GPG checking will be disabled.
; If you specify just '.sig' (or use the default
; and don't actually specify a mirrorfile),
; we'll try to guess based on the file from the sha1
; checksums.
; 0.) No whitespace (if specified)
; 1.) Must be the full path
; 2.) Don't include the mirror domain or protocol
mirrorgpgsig = ${mirrorfile}.sig

; What is a valid key ID that should be used to
; verify the tarballs?
; 0.) Only used if mirrorgpgsig is set
; 1.) Should be in the "shortform"
; (e.g. 7F2D434B9741E8AC)
gpgkey = 7F2D434B9741E8AC

; What is a valid keyserver we should use
; to fetch gpgkey?
; 0.) Only used if mirrorgpgsig is set
; 1.) The default is probably fine.
gpgkeyserver = pgp.mit.edu

; Where should we save the bootstrap tarballs?
; 0.) No whitespace
; 1.) Will be created if it doesn't exist
dlpath = /var/tmp/${bdisk:uxname}

; Where should the bootstrap tarballs extract to and the
; chroots be built?
; 0.) No whitespace
; 1.) Will be created if it doesn't exist
chrootdir = /var/tmp/chroots

; Where is the base of the BDisk project located?
; In other words, if you cloned BDisk from git,
; what is BDisk's working tree directory?
@ -164,9 +230,10 @@ mountpt = /mnt/${bdisk:uxname}
; Should we build a multiarch image? That is to say, the
; same ISO file can be used for both i686 and x86_64.
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
; 1|0
; yes/true (buld both i686, x86_64 in same image)
; no/false (build separate images, both arch's)
; i686 (ONLY build i686 architecture)
; x86_64 (ONLY build x86_64 architecture)
; If it is undefined, it is assumed to be no.
multiarch = yes

@ -181,10 +248,9 @@ multiarch = yes
ipxe = yes

; This option should only be enabled if you are on a fairly
; powerful, multicore system with plenty of RAM.
; It will speed the build process along, but will have
; some seriously adverse effects if your system
; can't handle it.
; powerful, multicore system with plenty of RAM. It will
; speed the build process along, but will have some
; seriously adverse effects if your system can't handle it.
; Most modern systems should be fine with leaving it enabled.
; 0.) Only accepts (case-insensitive):
; yes|no
@ -212,9 +278,9 @@ i_am_a_racecar = yes
http = yes

; Should we generate/prepare TFTP files?
; This is mostly only useful if you plan on using more
; traditional (non-iPXE) setups and regualar PXE
; bootstrapping into iPXE.
; This is mostly only useful if you plan on using more
; traditional (non-iPXE) setups and regualar PXE bootstrapping
; into iPXE.
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
@ -223,19 +289,18 @@ http = yes
tftp = yes

; Enable automatic Git pushing for any changes done to the
; project itself?
; If you don't have upstream write access, you'll want to
; set this to False.
; project itself? If you don't have upstream write access,
; you'll want to set this to False.
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
; 1|0
; If it is undefined, it is assumed to be no.
git = yes
git = no

; Enable rsync pushing for the ISO (and other files, if
; you choose- useful for iPXE over HTTP(S)).
rsync = yes
rsync = no


#---------------------------------------------------------#
@ -275,8 +340,8 @@ group = http
path = ${build:basedir}/tftpboot

; What user and group, if applicable, should the TFTP files
; be owned as? This is most likely going to be either
; 'tftp' or 'root'.
; be owned as? This is most likely going to be either 'tftp'
; or 'root'.
; 0.) No whitespace
; 1.) User must exist on system
; 2.) If sync:tftpdir is blank, they will not be used
@ -291,9 +356,9 @@ group = root
[ipxe]

; Build a "mini-ISO"; that is, an ISO file that can be used
; to bootstrap an iPXE environment (so you don't need
; to set up a traditional PXE environment on your LAN).
;We'll still build a full standalone ISO no matter what.
; to bootstrap an iPXE environment (so you don't need to set
; up a traditional PXE environment on your LAN). We'll still
; build a full standalone ISO no matter what.
; 0.) Only accepts (case-insensitive):
; yes|no
; true|false
@ -301,13 +366,12 @@ group = root
; If it is undefined, it is assumed to be no.
iso = yes

; Build a "mini-USB" image? Same concept as the ISO file
; but this can be dd'd onto a USB thumbdrive for the
; same effect.
; Build a "mini-USB" image? Same concept as the ISO file but
; this can be dd'd onto a USB thumbdrive for the same effect.
usb = yes

; What URI should iPXE's EMBED script use?
; DO NOT USE A ',' (comma); instead, replace it with:
; What URI should iPXE's EMBED script use? DO NOT USE A
; ',' (comma); instead, replace it with:
; %%COMMA%%
; If you require HTTP BASIC Authentication or HTTP Digest
; Authentication (untested), you can format it via:
@ -315,7 +379,7 @@ usb = yes
; https://user:password@domain.tld/page.php
;
; This currently does not work for HTTPS with self-signed
; certificates.
; certificates.
; 0.) REQUIRED if iso and/or usb is set to True/yes/etc.
; 1.) Must be a valid URI understood by minimal versions
; of curl.
@ -324,15 +388,15 @@ uri = https://bdisk.square-r00t.net
; Path to the (root) CA certificate file iPXE should use.
; Note that you can use your own CA to sign existing certs.
; See http://ipxe.org/crypto for more info. This is handy if
; you run a third-party/"Trusted" root-CA-signed
; certificate for the HTTPS target.
; you run a third-party/"Trusted" root-CA-signed certificate
; for the HTTPS target.
; 0.) No whitespace
; 1.) Must be in PEM/X509 format
; 2.) REQUIRED if iso and/or usb is set to True/yes/etc.
; 3.) If specified, a matching key (ssl_cakey) MUST be
; specified
; specified
; 4.) HOWEVER, if left blank, one will be automatically
; generated
; generated
ssl_ca =

; Path to the (root) CA key file iPXE should use.

View File

@ -1,5 +1,8 @@
#!/bin/bash

# we need this fix before anything.
dirmngr </dev/null > /dev/null 2>&1

# Import settings.
if [[ -f /root/VARS.txt ]];
then

View File

@ -1,5 +1,5 @@
Version: {{ build['ver'] }}
Build: {{ build['name'] }}
Time: {{ build['time'] }}
Machine: {{ build['host'] }}
Machine: {{ hostname }}
User: {{ build['user'] }}{% if build['realuser'] is defined and build['realuser'] > 0 %} ({{ build['realuser'] }}){% endif %}