minor fixes...
This commit is contained in:
parent
f12bb7799d
commit
dbeee4789d
@ -2,16 +2,9 @@ import os
|
||||
import sys
|
||||
import psutil
|
||||
import subprocess
|
||||
import ctypes
|
||||
import datetime
|
||||
|
||||
|
||||
def chrootMount(source, target, fs, options=''):
|
||||
ret = ctypes.CDLL('libc.so.6', use_errno=True).mount(source, target, fs, 0, options)
|
||||
if ret < 0:
|
||||
errno = ctypes.get_errno()
|
||||
raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
|
||||
format(source, fs, target, options, os.strerror(errno)))
|
||||
|
||||
def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
|
||||
# MOUNT the chroot
|
||||
mountpoints = psutil.disk_partitions(all = True)
|
||||
@ -50,8 +43,8 @@ def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
|
||||
if (chrootdir + '/tmp') not in mounts:
|
||||
subprocess.call(['/bin/mount', '-t', 'tmpfs', '-o', 'mode=1777,strictatime,nodev,nosuid', 'tmp', chrootdir + '/tmp'])
|
||||
|
||||
print("Performing '{0}' in chroot for {1}...".format(cmd, chrootdir))
|
||||
print("You can view the progress via:\n\n\ttail -f {0}/var/log/chroot_install.log\n".format(chrootdir))
|
||||
print("{0}: Performing '{1}' in chroot for {2}...".format(datetime.datetime.now(), cmd, chrootdir))
|
||||
print("\t\t\t You can view the progress via:\n\n\t\ttail -f {0}/var/log/chroot_install.log\n".format(chrootdir))
|
||||
real_root = os.open("/", os.O_RDONLY)
|
||||
os.chroot(chrootdir)
|
||||
os.system('locale-gen > /dev/null 2>&1')
|
||||
|
@ -12,10 +12,10 @@ import datetime
|
||||
# we also need to figure out how to implement "mentos" (old bdisk) like functionality, letting us reuse an existing chroot install if possible to save time for future builds.
|
||||
# if not, though, it's no big deal.
|
||||
if __name__ == '__main__':
|
||||
print('Starting at {0}.'.format(datetime.datetime.now()))
|
||||
print('{0}: Starting.'.format(datetime.datetime.now()))
|
||||
conf = host.parseConfig(host.getConfig())[1]
|
||||
prep.dirChk(conf)
|
||||
prep.buildChroot(conf['build'])
|
||||
prep.buildChroot(conf['build'], keep = False)
|
||||
prep.prepChroot(conf['build'], conf['bdisk'], conf['user'])
|
||||
arch = conf['build']['arch']
|
||||
for a in arch:
|
||||
@ -26,4 +26,4 @@ if __name__ == '__main__':
|
||||
build.genUEFI(conf['build'], conf['bdisk'])
|
||||
fulliso = build.genISO(conf)
|
||||
build.displayStats(fulliso)
|
||||
print('Finished successfully at {0}.'.format(datetime.datetime.now()))
|
||||
print('{0}: Finish.'.format(datetime.datetime.now()))
|
||||
|
@ -6,6 +6,7 @@ import subprocess
|
||||
import hashlib
|
||||
import jinja2
|
||||
import humanize
|
||||
import datetime
|
||||
from urllib.request import urlopen
|
||||
|
||||
|
||||
@ -27,7 +28,9 @@ def genImg(build, bdisk):
|
||||
airoot = archboot + '/' + a + '/'
|
||||
squashimg = airoot + 'airootfs.sfs'
|
||||
os.makedirs(airoot, exist_ok = True)
|
||||
print("Generating squashed filesystem image for {0}. Please wait...".format(chrootdir + '/root.' + a))
|
||||
print("{0}: Generating squashed filesystem image for {1}. Please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
chrootdir + '/root.' + a))
|
||||
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
||||
# /dev/null.
|
||||
DEVNULL = open(os.devnull, 'w')
|
||||
@ -38,8 +41,15 @@ def genImg(build, bdisk):
|
||||
'-noappend',
|
||||
'-comp', 'xz']
|
||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
||||
print("{0}: Generated {1} ({2}).".format(
|
||||
datetime.datetime.now(),
|
||||
squashimg,
|
||||
humanize.naturalsize(
|
||||
os.path.getsize(squashimg))))
|
||||
# Generate the checksum files
|
||||
print("Generating SHA256 and MD5 hash checksum files for {0}. Please wait...".format(squashimg))
|
||||
print("{0}: Generating SHA256 and MD5 hash checksum files for {1}. Please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
squashimg))
|
||||
hashes['sha256'][a] = hashlib.sha256()
|
||||
hashes['md5'][a] = hashlib.md5()
|
||||
with open(squashimg, 'rb') as f:
|
||||
@ -54,11 +64,14 @@ def genImg(build, bdisk):
|
||||
f.write("{0} airootfs.sfs".format(hashes['sha256'][a].hexdigest()))
|
||||
with open(airoot + 'airootfs.md5', 'w+') as f:
|
||||
f.write("{0} airootfs.sfs".format(hashes['md5'][a].hexdigest()))
|
||||
print("{0}: Hash checksums complete.".format(datetime.datetime.now()))
|
||||
# Logo
|
||||
os.makedirs(tempdir + '/boot', exist_ok = True)
|
||||
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
||||
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||
else:
|
||||
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(tempdir, bdisk['uxname']))
|
||||
# Kernels, initrds...
|
||||
# We use a dict here so we can use the right filenames...
|
||||
# I might change how I handle this in the future.
|
||||
bootfiles = {}
|
||||
@ -88,7 +101,7 @@ def genUEFI(build, bdisk):
|
||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v2.efi'):
|
||||
shell2_path = tempdir + '/EFI/shellx64_v2.efi'
|
||||
print("You are missing {0}. We'll download it for you.".format(shell2_path))
|
||||
print("{0}: You are missing {1}. We'll download it for you.".format(datetime.datetime.now(), shell2_path))
|
||||
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
||||
shell2_fetch = urlopen(shell2_url)
|
||||
with open(shell2_path, 'wb+') as dl:
|
||||
@ -98,13 +111,13 @@ def genUEFI(build, bdisk):
|
||||
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
||||
if not os.path.isfile(tempdir + '/EFI/shellx64_v1.efi'):
|
||||
shell1_path = tempdir + '/EFI/shellx64_v1.efi'
|
||||
print("You are missing {0}. We'll download it for you.".format(shell1_path))
|
||||
print("{0}: You are missing {1}. We'll download it for you.".format(datetime.datetime.now(), shell1_path))
|
||||
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
||||
shell1_fetch = urlopen(shell1_url)
|
||||
with open(shell1_path, 'wb+') as dl:
|
||||
dl.write(shell1_fetch.read())
|
||||
shell1_fetch.close()
|
||||
print("Configuring UEFI bootloading...")
|
||||
print("{0}: Configuring UEFI bootloading...".format(datetime.datetime.now()))
|
||||
## But wait! That's not all! We need more binaries.
|
||||
# http://blog.hansenpartnership.com/linux-foundation-secure-boot-system-released/
|
||||
shim_url = 'http://blog.hansenpartnership.com/wp-uploads/2013/'
|
||||
@ -162,8 +175,11 @@ def genUEFI(build, bdisk):
|
||||
for file in files:
|
||||
fname = os.path.join(path, file)
|
||||
sizetotal += os.path.getsize(fname)
|
||||
# And now we create the file...
|
||||
print("Creating a {0} bytes EFI ESP image at {1}. Please wait...".format(sizetotal, efiboot_img))
|
||||
# And now we create the EFI binary filesystem image/binary...
|
||||
print("{0}: Creating a {1} EFI ESP image at {2}. Please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
humanize.naturalsize(sizetotal),
|
||||
efiboot_img))
|
||||
if os.path.isfile(efiboot_img):
|
||||
os.remove(efiboot_img)
|
||||
with open(efiboot_img, 'wb+') as f:
|
||||
@ -215,13 +231,15 @@ def genUEFI(build, bdisk):
|
||||
'{0}/EFI/{1}/{2}.efi'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
||||
shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, 'x86_64', bdisk['name']),
|
||||
'{0}/EFI/{1}/{2}.img'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
||||
# TODO: support both Arch's as EFI bootable instead? Maybe? requires more research. very rare.
|
||||
# TODO: support both arch's as EFI bootable instead? Maybe? requires more research. very rare.
|
||||
#shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, a, bdisk['name']),
|
||||
# '{0}/EFI/{1}/{2}.{3}.efi'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
||||
#shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, a, bdisk['uxname']),
|
||||
# '{0}/EFI/{1}/{2}.{3}.img'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
||||
cmd = ['/bin/umount', mountpt]
|
||||
subprocess.call(cmd)
|
||||
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
|
||||
print('{0}: Built EFI binary.'.format(datetime.datetime.now()))
|
||||
return(efiboot_img)
|
||||
|
||||
def genISO(conf):
|
||||
@ -270,7 +288,7 @@ def genISO(conf):
|
||||
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
||||
minipath = build['isodir'] + '/' + usbfile
|
||||
# Copy isolinux files
|
||||
print("Staging some files for ISO preparation. Please wait...")
|
||||
print("{0}: Staging some files for ISO preparation. Please wait...".format(datetime.datetime.now()))
|
||||
isolinux_files = ['isolinux.bin',
|
||||
'vesamenu.c32',
|
||||
'linux.c32',
|
||||
@ -299,7 +317,7 @@ def genISO(conf):
|
||||
f.write(tpl_out)
|
||||
# And we need to build the ISO!
|
||||
# TODO: only include UEFI support if we actually built it!
|
||||
print("Generating the full ISO at {0}. Please wait.".format(isopath))
|
||||
print("{0}: Generating the full ISO at {1}. Please wait.".format(datetime.datetime.now(), isopath))
|
||||
if efi:
|
||||
cmd = ['/usr/bin/xorriso',
|
||||
'-as', 'mkisofs',
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
import configparser
|
||||
import validators
|
||||
import git
|
||||
import datetime
|
||||
from socket import getaddrinfo
|
||||
|
||||
def getOS():
|
||||
@ -79,7 +80,8 @@ def parseConfig(conf):
|
||||
elif config_dict['build']['multiarch'] == 'i686':
|
||||
config_dict['build']['arch'] = ['i686']
|
||||
else:
|
||||
exit(('ERROR: {0} is not a valid value. Check your configuration.').format(
|
||||
exit(('{0}: ERROR: {1} is not a valid value. Check your configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['build']['multiarch']))
|
||||
## VALIDATORS ##
|
||||
# Validate bootstrap mirror
|
||||
@ -89,8 +91,9 @@ def parseConfig(conf):
|
||||
try:
|
||||
getaddrinfo(config_dict['build']['mirror'], None)
|
||||
except:
|
||||
exit(('ERROR: {0} does not resolve and cannot be used as a ' +
|
||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used as a ' +
|
||||
'mirror for the bootstrap tarballs. Check your configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['build']['host']))
|
||||
# Are we rsyncing? If so, validate the rsync host.
|
||||
# Works for IP address too. It does NOT check to see if we can
|
||||
@ -102,22 +105,29 @@ def parseConfig(conf):
|
||||
try:
|
||||
getaddrinfo(config_dict['rsync']['host'], None)
|
||||
except:
|
||||
exit(('ERROR: {0} does not resolve and cannot be used for rsyncing.' +
|
||||
'Check your configuration.').format(config_dict['rsync']['host']))
|
||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used for rsyncing.' +
|
||||
'Check your configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['rsync']['host']))
|
||||
else:
|
||||
exit(('ERROR: {0} is not a valid host and cannot be used for rsyncing.' +
|
||||
'Check your configuration.').format(config_dict['rsync']['host']))
|
||||
exit(('{0}: ERROR: {1} is not a valid host and cannot be used for rsyncing.' +
|
||||
'Check your configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['rsync']['host']))
|
||||
# Validate the URI.
|
||||
if config_dict['build']['ipxe']:
|
||||
# so this won't validate e.g. custom LAN domains (https://pxeserver/bdisk.php). TODO.
|
||||
if not validators.url(config_dict['ipxe']['uri']):
|
||||
if not re.match('^https?://localhost(/.*)?$'):
|
||||
exit('ERROR: {0} is not a valid URL/URI. Check your configuration.'.format(
|
||||
exit('{0}: ERROR: {1} is not a valid URL/URI. Check your configuration.'.format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['ipxe']['uri']))
|
||||
# Validate required paths
|
||||
if not os.path.exists(config_dict['build']['basedir'] + '/extra'):
|
||||
exit(("ERROR: {0} does not contain BDisk's core files!" +
|
||||
"Check your configuration.").format(config_dict['build']['basedir']))
|
||||
exit(("{0}: ERROR: {1} does not contain BDisk's core files!" +
|
||||
"Check your configuration.").format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['build']['basedir']))
|
||||
# Make dirs if they don't exist
|
||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'):
|
||||
os.makedirs(config_dict['build'][d], exist_ok = True)
|
||||
@ -131,10 +141,14 @@ def parseConfig(conf):
|
||||
for x in ('ssl_key', 'ssl_cakey'):
|
||||
if config_dict['ipxe'][x]:
|
||||
if not os.path.isfile(config_dict['ipxe'][x]):
|
||||
exit(('ERROR: {0} is not an existing file. Check your' +
|
||||
'configuration.').format(config_dict['ipxe'][x]))
|
||||
exit(('{0}: ERROR: {1} is not an existing file. Check your' +
|
||||
'configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['ipxe'][x]))
|
||||
if config_dict['ipxe']['ssl_ca']:
|
||||
if not os.path.isfile(config_dict['ipxe']['ssl_ca']):
|
||||
exit(('ERROR: {0} is not an existing file. Check your' +
|
||||
'configuration.').format(config_dict['ipxe']['ssl_ca']))
|
||||
exit(('{0}: ERROR: {1} is not an existing file. Check your' +
|
||||
'configuration.').format(
|
||||
datetime.datetime.now(),
|
||||
config_dict['ipxe']['ssl_ca']))
|
||||
return(config, config_dict)
|
||||
|
@ -4,6 +4,7 @@ import jinja2
|
||||
import git
|
||||
import patch
|
||||
import OpenSSL
|
||||
import datetime
|
||||
|
||||
|
||||
def sslIPXE(conf):
|
||||
@ -21,7 +22,9 @@ def buildIPXE(conf):
|
||||
ipxe_src = srcdir + '/ipxe'
|
||||
ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
|
||||
patches_git_uri = 'https://github.com/eworm-de/ipxe.git'
|
||||
print('Building iPXE in {0}. Please wait...'.format(ipxe_src))
|
||||
print('{0}: Building iPXE in {1}. Please wait...'.format(
|
||||
datetime.datetime.now(),
|
||||
ipxe_src))
|
||||
# Get the source and apply some cherrypicks
|
||||
if os.path.isdir(ipxe_src):
|
||||
shutil.rmtree(ipxe_src)
|
||||
|
@ -9,6 +9,7 @@ import re
|
||||
#import git
|
||||
import jinja2
|
||||
import datetime
|
||||
import humanize
|
||||
from urllib.request import urlopen
|
||||
import host # bdisk.host
|
||||
|
||||
@ -44,11 +45,11 @@ def downloadTarball(build):
|
||||
if build['mirrorgpgsig'] != '':
|
||||
# we don't want to futz with the user's normal gpg.
|
||||
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
|
||||
print("\nGenerating a GPG key. Please wait...")
|
||||
print("\n{0}: Generating a GPG key. Please wait...".format(datetime.datetime.now()))
|
||||
# python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af.
|
||||
# TODO: remove this when the error doesn't happen anymore.
|
||||
print("If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error, it can be safely ignored.")
|
||||
print("If this is taking a VERY LONG time, try installing haveged and starting it. This can be " +
|
||||
print("\tIf you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error, it can be safely ignored.")
|
||||
print("\tIf this is taking a VERY LONG time, try installing haveged and starting it. This can be " +
|
||||
"done safely in parallel with the build process.\n")
|
||||
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
|
||||
key = gpg.gen_key(input_data)
|
||||
@ -62,18 +63,27 @@ def downloadTarball(build):
|
||||
pass
|
||||
else:
|
||||
# fetch the tarball...
|
||||
print("Fetching the tarball for {0} architecture, please wait...".format(a))
|
||||
print("{0}: Fetching the tarball for {1} architecture, please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
a))
|
||||
#dl_file = urllib.URLopener()
|
||||
tarball_dl = urlopen(rlsdir + tarball)
|
||||
with open(tarball_path[a], 'wb') as f:
|
||||
f.write(tarball_dl.read())
|
||||
tarball_dl.close()
|
||||
print(("Checking that the hash checksum for {0} matches {1}, please wait...").format(
|
||||
tarball_path[a], sha1))
|
||||
print("{0}: Done fetching {1} ({2}).".format(
|
||||
datetime.datetime.now(),
|
||||
tarball_path[a],
|
||||
humanize.naturalsize(
|
||||
os.path.getsize(tarball_path[a]))))
|
||||
print("{0}: Checking that the hash checksum for {1} matches {2}, please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
tarball_path[a],
|
||||
sha1))
|
||||
tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest()
|
||||
if tarball_hash != sha1:
|
||||
exit(("{0} either did not download correctly or a wrong (probably old) version exists on the filesystem.\n" +
|
||||
"Please delete it and try again.").format(tarball))
|
||||
exit(("{0}: {1} either did not download correctly or a wrong (probably old) version exists on the filesystem.\n" +
|
||||
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
|
||||
elif build['mirrorgpgsig'] != '':
|
||||
# okay, so the sha1 matches. let's verify the signature.
|
||||
if build['mirrorgpgsig'] == '.sig':
|
||||
@ -91,36 +101,46 @@ def downloadTarball(build):
|
||||
gpg_verify = gpg.verify_data(sig, tarball_data_in)
|
||||
tarball_data.close()
|
||||
if not gpg_verify:
|
||||
exit("There was a failure checking {0} against {1}. Please investigate.".format(
|
||||
sig, tarball_path[a]))
|
||||
exit("{0}: There was a failure checking {1} against {2}. Please investigate.".format(
|
||||
datetime.datetime.now(),
|
||||
sig,
|
||||
tarball_path[a]))
|
||||
os.remove(sig)
|
||||
|
||||
return(tarball_path)
|
||||
|
||||
def unpackTarball(tarball_path, build):
|
||||
def unpackTarball(tarball_path, build, keep = False):
|
||||
chrootdir = build['chrootdir']
|
||||
if os.path.isdir(chrootdir):
|
||||
if not keep:
|
||||
# Make the dir if it doesn't exist
|
||||
shutil.rmtree(chrootdir, ignore_errors = True)
|
||||
os.makedirs(chrootdir, exist_ok = True)
|
||||
print("Extracting the tarball(s). Please wait...")
|
||||
else:
|
||||
os.makedirs(chrootdir, exist_ok = True)
|
||||
# Open and extract the tarball
|
||||
if not keep:
|
||||
for a in build['arch']:
|
||||
print("{0}: Extracting tarball {1} ({2}). Please wait...".format(
|
||||
datetime.datetime.now(),
|
||||
tarball_path[a],
|
||||
humanize.naturalsize(
|
||||
os.path.getsize(tarball_path[a]))))
|
||||
tar = tarfile.open(tarball_path[a], 'r:gz')
|
||||
tar.extractall(path = chrootdir)
|
||||
tar.close()
|
||||
print("Extraction for {0} finished.".format(tarball_path[a]))
|
||||
print("{0}: Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
||||
|
||||
def buildChroot(build):
|
||||
def buildChroot(build, keep = False):
|
||||
dlpath = build['dlpath']
|
||||
chrootdir = build['chrootdir']
|
||||
arch = build['arch']
|
||||
extradir = build['basedir'] + '/extra'
|
||||
unpack_me = unpackTarball(downloadTarball(build), build)
|
||||
unpack_me = unpackTarball(downloadTarball(build), build, keep)
|
||||
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
|
||||
prebuild_overlay = {}
|
||||
prebuild_arch_overlay = {}
|
||||
for x in arch:
|
||||
os.remove('{0}/root.{1}/README'.format(chrootdir, x))
|
||||
prebuild_arch_overlay[x] = {}
|
||||
for y in ['files', 'dirs']:
|
||||
prebuild_overlay[y] = []
|
||||
@ -182,7 +202,7 @@ def prepChroot(build, bdisk, user):
|
||||
build['buildnum'] = 0
|
||||
build['buildnum'] += 1
|
||||
with open(dlpath + '/buildnum', 'w+') as f:
|
||||
f.write(str(build['buildnum']))
|
||||
f.write(str(build['buildnum']) + "\n")
|
||||
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
|
||||
loader = jinja2.FileSystemLoader(templates_dir)
|
||||
env = jinja2.Environment(loader = loader)
|
||||
@ -208,6 +228,7 @@ def postChroot(build):
|
||||
postbuild_overlay = {}
|
||||
postbuild_arch_overlay = {}
|
||||
for x in arch:
|
||||
os.remove('{0}/root.{1}/README'.format(chrootdir, x))
|
||||
postbuild_arch_overlay[x] = {}
|
||||
for y in ['files', 'dirs']:
|
||||
postbuild_overlay[y] = []
|
||||
|
@ -13,6 +13,7 @@
|
||||
-- https://github.com/akopytov/sysbench
|
||||
-- (http://blog.due.io/2014/linode-digitalocean-and-vultr-comparison/ etc.)
|
||||
-implement pyalpm to decreate dependency on chroot pacman-ing?
|
||||
--or even maybe https://wiki.archlinux.org/index.php/offline_installation_of_packages in pure python!
|
||||
|
||||
|
||||
## NETWORKING ##
|
||||
|
56
extra/pre-build.d/etc/locale.nopurge
Normal file
56
extra/pre-build.d/etc/locale.nopurge
Normal file
@ -0,0 +1,56 @@
|
||||
####################################################
|
||||
# This is the configuration file for localepurge(8).
|
||||
####################################################
|
||||
# Comment this to enable localepurge.
|
||||
# NO COMMENT IT IF YOU ARE NOT SURE WHAT ARE YOU DOING
|
||||
# THIS APP DO NOT ASK FOR CONFIRMATION
|
||||
|
||||
#NEEDSCONFIGFIRST
|
||||
|
||||
####################################################
|
||||
# Uncommenting this string enables removal of localized
|
||||
# man pages based on the configuration information for
|
||||
# locale files defined below:
|
||||
|
||||
MANDELETE
|
||||
|
||||
####################################################
|
||||
# Uncommenting this string causes localepurge to simply delete
|
||||
# locales which have newly appeared on the system without
|
||||
# bothering you about it:
|
||||
|
||||
#DONTBOTHERNEWLOCALE
|
||||
|
||||
#####################################################
|
||||
# Commenting out this string enables faster but less
|
||||
# accurate calculation of freed disk space:
|
||||
|
||||
#QUICKNDIRTYCALC
|
||||
|
||||
####################################################
|
||||
# Uncommenting this string enables display of freed disk
|
||||
# space if localepurge has purged any superfluous data:
|
||||
|
||||
SHOWFREEDSPACE
|
||||
|
||||
#####################################################
|
||||
# Commenting out this string disables verbose output:
|
||||
|
||||
#VERBOSE
|
||||
|
||||
#####################################################
|
||||
# You like Colors?
|
||||
|
||||
NOCOLOR
|
||||
|
||||
#####################################################
|
||||
# You can use the -v -d -nc options in command line.
|
||||
|
||||
#####################################################
|
||||
# Following locales won't be deleted from this system
|
||||
# for example:
|
||||
en
|
||||
en_GB
|
||||
en_GB.UTF-8
|
||||
en_US
|
||||
en_US.UTF-8
|
@ -20,7 +20,7 @@ bin86
|
||||
bind-tools
|
||||
binutils
|
||||
bluez-utils
|
||||
#bonnie++ ## TODO: ugh. apacman has a new "regex mode"... that you can't disable. https://github.com/oshazard/apacman/issues/79
|
||||
bonnie++ ## TODO: ugh. apacman has a new "regex mode"... that you can't disable. https://github.com/oshazard/apacman/issues/79
|
||||
boxbackup-client
|
||||
boxbackup-server
|
||||
bozocrack-git
|
||||
|
@ -2,9 +2,6 @@
|
||||
|
||||
source /etc/bash.bashrc
|
||||
|
||||
# we need this fix before anything.
|
||||
dirmngr </dev/null > /dev/null 2>&1
|
||||
|
||||
# Import settings.
|
||||
source /root/VARS.txt
|
||||
|
||||
@ -13,6 +10,10 @@ exec 3>&1 4>&2
|
||||
trap 'exec 2>&4 1>&3' 0 1 2 3
|
||||
exec 1>/var/log/chroot_install.log 2>&1
|
||||
|
||||
# we need this fix before anything.
|
||||
dirmngr </dev/null > /dev/null 2>&1
|
||||
locale-gen
|
||||
|
||||
cleanPacorigs()
|
||||
{
|
||||
for x in $(find /etc/ -type f -iname "*.pacorig");
|
||||
@ -20,7 +21,18 @@ cleanPacorigs()
|
||||
mv -f ${x} ${x%%.pacorig}
|
||||
done
|
||||
}
|
||||
|
||||
getPkgList()
|
||||
{
|
||||
if [ -f "${1}" ];
|
||||
then
|
||||
pkgfile=$(cat ${1})
|
||||
echo "${pkgfile}" | \
|
||||
sed -r -e '/^[[:space:]]*(#|$)/d' \
|
||||
-e 's/[[:space:]]*#.*$//g' | \
|
||||
tr '\n' ' ' | \
|
||||
sed -re 's/([+\_])/\\\1/g'
|
||||
fi
|
||||
}
|
||||
# NetworkManager is a scourge upon the earth that must be purged and cleansed.
|
||||
ln -s /dev/null /etc/systemd/system/NetworkManager.service
|
||||
ln -s /dev/null /etc/systemd/system/NetworkManager-dispatcher.service
|
||||
@ -64,7 +76,8 @@ else
|
||||
TGT_ARCH='i686'
|
||||
fi
|
||||
# Install some stuff we need for the ISO.
|
||||
PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/iso.pkgs.both | tr '\n' ' ')
|
||||
#PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/iso.pkgs.both | tr '\n' ' ')
|
||||
PKGLIST=$(getPkgList /root/iso.pkgs.both)
|
||||
if [[ -n "${PKGLIST}" ]];
|
||||
then
|
||||
apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
@ -72,7 +85,8 @@ then
|
||||
cleanPacorigs
|
||||
fi
|
||||
# And install arch-specific packages for the ISO, if there are any.
|
||||
PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/iso.pkgs.arch | tr '\n' ' ')
|
||||
#PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/iso.pkgs.arch | tr '\n' ' ')
|
||||
PKGLIST=$(getPkgList /root/iso.pkgs.arch)
|
||||
if [[ -n "${PKGLIST}" ]];
|
||||
then
|
||||
apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
@ -89,10 +103,16 @@ mv -f /boot/vmlinuz-linux /boot/vmlinuz-linux-${DISTNAME}
|
||||
cleanPacorigs
|
||||
|
||||
# And install EXTRA functionality packages, if there are any.
|
||||
PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/packages.both | tr '\n' ' ')
|
||||
#PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/packages.both | tr '\n' ' ')
|
||||
PKGLIST=$(getPkgList /root/packages.both)
|
||||
if [[ -n "${PKGLIST}" ]];
|
||||
then
|
||||
apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
echo "Now installing your extra packages. This will take a while and might appear to hang."
|
||||
#yes 1 | apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
for p in ${PKGLIST};
|
||||
do
|
||||
apacman --noconfirm --noedit --skipinteg -S --needed ${p}
|
||||
done
|
||||
apacman --gendb
|
||||
cleanPacorigs
|
||||
fi
|
||||
@ -127,17 +147,34 @@ fi
|
||||
cleanPacorigs
|
||||
mv -f /boot/initramfs-linux.img /boot/initramfs-linux-${DISTNAME}.img
|
||||
# And install arch-specific extra packages, if there are any.
|
||||
PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/packages.arch | tr '\n' ' ')
|
||||
#PKGLIST=$(sed -re '/^[[:space:]]*(#|$)/d' /root/packages.arch | tr '\n' ' ')
|
||||
PKGLIST=$(getPkgList /root/packages.arch)
|
||||
if [[ -n "${PKGLIST}" ]];
|
||||
then
|
||||
#apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
for p in ${PKGLIST};
|
||||
do
|
||||
apacman --noconfirm --noedit --skipinteg -S --needed ${PKGLIST}
|
||||
done
|
||||
apacman --gendb
|
||||
cleanPacorigs
|
||||
fi
|
||||
# Run any arch-specific tasks here.
|
||||
if [ -f '/root/pre-build.arch.sh' ];
|
||||
then
|
||||
cnt=$(sed -re '/^[[:space:]]*(#|$)/d' /root/pre-build.arch.sh | wc -l)
|
||||
if [[ "${cnt}" -ge 1 ]];
|
||||
then
|
||||
/root/pre-build.arch.sh
|
||||
fi
|
||||
rm -f /root/pre-build.arch.sh
|
||||
fi
|
||||
# Cleanup
|
||||
#yes | pacman -Scc # doesn't parse yes(1) output correctly, it seems.
|
||||
# TODO: look into https://wiki.archlinux.org/index.php/Pacman/Tips_and_tricks#Removing_unused_packages_.28orphans.29
|
||||
paccache -rk0
|
||||
localepurge-config
|
||||
localepurge
|
||||
rm -f /root/.bash_history
|
||||
rm -f /root/.viminfo
|
||||
rm -f /root/apacman-*.pkg.tar.xz
|
||||
|
@ -71,3 +71,4 @@ LABEL {{ bdisk['uxname'] }}_{{ bitness }}
|
||||
ENDTEXT
|
||||
|
||||
MENU END
|
||||
|
||||
|
@ -108,3 +108,4 @@ LABEL {{ bdisk['uxname'] }}_32
|
||||
ENDTEXT
|
||||
|
||||
MENU END
|
||||
|
||||
|
@ -2,3 +2,4 @@ title {{ bdisk['pname'] }} (Media)
|
||||
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
||||
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
||||
options archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
||||
|
||||
|
@ -1,2 +1,3 @@
|
||||
timeout 3
|
||||
default {{ bdisk['uxname'] }}
|
||||
|
||||
|
@ -2,3 +2,4 @@ title {{ bdisk['pname'] }} (RAM)
|
||||
linux /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.{% if efi is defined %}efi{% else %}kern{% endif %}
|
||||
initrd /{% if efi is defined %}EFI/{{ bdisk['name'] }}{% else %}boot{% endif %}/{{ bdisk['uxname'] }}.img
|
||||
options copytoram archisobasedir={{ bdisk['name'] }} archisolabel={{ bdisk['name'] }}
|
||||
|
||||
|
@ -1,2 +1,3 @@
|
||||
title UEFI Shell (v1)
|
||||
efi /EFI/shellx64_v1.efi
|
||||
|
||||
|
@ -1,2 +1,3 @@
|
||||
title UEFI Shell (v2)
|
||||
efi /EFI/shellx64_v2.efi
|
||||
|
||||
|
@ -6,3 +6,4 @@ export DISTDESC='{{ bdisk['desc'] }}'
|
||||
export REGUSR='{{ bdisk['name']|lower }}'
|
||||
export REGUSR_PASS='{{ user['password'] }}'
|
||||
export ROOT_PASS='{{ bdisk['root_password'] }}'
|
||||
|
||||
|
@ -6,3 +6,4 @@ dhcp
|
||||
#imgverify vmlinuz path/to/vmlinuz.sig
|
||||
#imgverify initrd path/to/initrd.sig
|
||||
chain {{ ipxe['uri'] }}
|
||||
|
||||
|
@ -31,3 +31,4 @@ keyUsage = critical,cRLSign,keyCertSign
|
||||
[ codesigning ]
|
||||
keyUsage = digitalSignature
|
||||
extendedKeyUsage = codeSigning
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user