checking in- currently trying to get the cleanup sorted. i need something that works better, i think
This commit is contained in:
		
							parent
							
								
									36c7da470a
								
							
						
					
					
						commit
						b95bef3b17
					
				| @ -11,13 +11,15 @@ import psutil | |||||||
| #from pychroot.base import Chroot | #from pychroot.base import Chroot | ||||||
| import pychroot | import pychroot | ||||||
| import subprocess | import subprocess | ||||||
|  | import ctypes | ||||||
| 
 | 
 | ||||||
| #class mountpoints(argparse.Action): | 
 | ||||||
| # | def chrootMount(source, target, fs, options=''): | ||||||
| #    def __call__(self, parser, namespace, values, option_string=None): |     ret = ctypes.CDLL('libc.so.6', use_errno=True).mount(source, target, fs, 0, options) | ||||||
| #        if not getattr(namespace, 'mountpoints', False): |     if ret < 0: | ||||||
| #            namespace.mountpoints = {} |         errno = ctypes.get_errno() | ||||||
| #        namespace.mountpoints.update(values) |         raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}". | ||||||
|  |                         format(source, fs, target, options, os.strerror(errno))) | ||||||
| 
 | 
 | ||||||
| def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'): | def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'): | ||||||
|     # MOUNT the chroot |     # MOUNT the chroot | ||||||
| @ -25,44 +27,48 @@ def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'): | |||||||
|     mounts = [] |     mounts = [] | ||||||
|     for m in mountpoints: |     for m in mountpoints: | ||||||
|         mounts.append(m.mountpoint) |         mounts.append(m.mountpoint) | ||||||
|     cmnts = {} |  | ||||||
|     # mount the chrootdir... onto itself. as a bind mount. it's so stupid, i know. see https://bugs.archlinux.org/task/46169 |     # mount the chrootdir... onto itself. as a bind mount. it's so stupid, i know. see https://bugs.archlinux.org/task/46169 | ||||||
|     if chrootdir not in mounts: |     if chrootdir not in mounts: | ||||||
|         #cmnts[chrootdir + ':' + chrootdir] = {'recursive': False, 'readonly': False, 'create': False} |         subprocess.call(['mount', '--bind', chrootdir, chrootdir]) | ||||||
|         cmnts[chrootdir + ':/'] = {'recursive': False, 'readonly': False, 'create': False} | ### The following mountpoints don't seem to mount properly with pychroot. save it for v3.n+1. TODO. ### | ||||||
| 
 |     # bind-mount so we can resolve things inside | ||||||
|  |     if (chrootdir + '/etc/resolv.conf') not in mounts: | ||||||
|  |         subprocess.call(['mount', '--bind', '-o', 'ro', '/etc/resolv.conf', chrootdir + '/etc/resolv.conf']) | ||||||
|     # mount -t proc to chrootdir + '/proc' here |     # mount -t proc to chrootdir + '/proc' here | ||||||
|     if (chrootdir + '/proc') not in mounts: |     if (chrootdir + '/proc') not in mounts: | ||||||
|         cmnts['proc:/proc'] = {'recursive': True, 'create': True} |         subprocess.call(['mount', '-t', 'proc', '-o', 'nosuid,noexec,nodev', 'proc', chrootdir + '/proc']) | ||||||
| 
 |  | ||||||
|     # rbind mount /sys to chrootdir + '/sys' here |     # rbind mount /sys to chrootdir + '/sys' here | ||||||
|     if (chrootdir + '/sys') not in mounts: |     if (chrootdir + '/sys') not in mounts: | ||||||
|         #cmnts['/sys:/sys'] = {'recursive': True, 'create': True}  # if the below doesn't work, try me. can also try ['sysfs:/sys'] |         subprocess.call(['mount', '-t', 'sysfs', '-o', 'nosuid,noexec,nodev,ro', 'sys', chrootdir + '/sys']) | ||||||
|         cmnts['/sys'] = {'recursive': True, 'create': True} |  | ||||||
| 
 |  | ||||||
|     # rbind mount /dev to chrootdir + '/dev' here |  | ||||||
|     if (chrootdir + '/dev') not in mounts: |  | ||||||
|         cmnts['/dev'] = {'recursive': True, 'create': True} |  | ||||||
| 
 |  | ||||||
|     # mount the efivars in the chroot if it exists on the host. i mean, why not? |     # mount the efivars in the chroot if it exists on the host. i mean, why not? | ||||||
|     if '/sys/firmware/efi/efivars' in mounts: |     if '/sys/firmware/efi/efivars' in mounts: | ||||||
|         if (chrootdir + '/sys/firmware/efi/efivars') not in mounts: |         if (chrootdir + '/sys/firmware/efi/efivars') not in mounts: | ||||||
|             cmnts['/sys/firmware/efi/efivars'] = {'recursive': True} |             subprocess.call(['mount', '-t', 'efivarfs', '-o', 'nosuid,noexec,nodev', 'efivarfs', chrootdir + '/sys/firmware/efi/efivars']) | ||||||
| 
 |     # rbind mount /dev to chrootdir + '/dev' here | ||||||
|  |     if (chrootdir + '/dev') not in mounts: | ||||||
|  |         subprocess.call(['mount', '-t', 'devtmpfs', '-o', 'mode=0755,nosuid', 'udev', chrootdir + '/dev']) | ||||||
|  |     if (chrootdir + '/dev/pts') not in mounts: | ||||||
|  |         subprocess.call(['mount', '-t', 'devpts', '-o', 'mode=0620,gid=5,nosuid,noexec', 'devpts', chrootdir + '/dev/pts']) | ||||||
|  |     if '/dev/shm' in mounts: | ||||||
|  |         if (chrootdir + '/dev/shm') not in mounts: | ||||||
|  |             subprocess.call(['mount', '-t', 'tmpfs', '-o', 'mode=1777,nosuid,nodev', 'shm', chrootdir + '/dev/shm']) | ||||||
|     if '/run' in mounts: |     if '/run' in mounts: | ||||||
|         if (chrootdir + '/run') not in mounts: |         if (chrootdir + '/run') not in mounts: | ||||||
|             cmnts['/run'] = {'recursive': True} |             subprocess.call(['mount', '-t', 'tmpfs', '-o', 'nosuid,nodev,mode=0755', 'run', chrootdir + '/run']) | ||||||
|  |     if '/tmp' in mounts: | ||||||
|  |         if (chrootdir + '/tmp') not in mounts: | ||||||
|  |             subprocess.call(['mount', '-t', 'tmpfs', '-o', 'mode=1777,strictatime,nodev,nosuid', 'tmp', chrootdir + '/tmp']) | ||||||
| 
 | 
 | ||||||
|     pychroot.base.Chroot.default_mounts = {} |     print("Now performing '{0}' in chroot for {1}...".format(cmd, chrootdir)) | ||||||
|     chroot = pychroot.base.Chroot(chrootdir, mountpoints = cmnts, hostname = chroot_hostname) |     print("You can view the progress via:\n\n\ttail -f {0}/var/log/chroot_install.log\n".format(chrootdir)) | ||||||
|     chroot.mount() |     real_root = os.open("/", os.O_RDONLY) | ||||||
|     with chroot: |     os.chroot(chrootdir) | ||||||
|         import os |     os.system('/root/pre-build.sh') | ||||||
|         os.system(cmd) |     os.fchdir(real_root) | ||||||
|     chroot.cleanup() |     os.chroot('.') | ||||||
|     return(chrootdir, cmnts) |     os.close(real_root) | ||||||
|  |     return(chrootdir) | ||||||
| 
 | 
 | ||||||
| #def chrootUnmount(chrootdir, cmnts): |  | ||||||
| def chrootUnmount(chrootdir): | def chrootUnmount(chrootdir): | ||||||
|     # TODO: https://github.com/pkgcore/pychroot/issues/22 try to do this more pythonically. then we can remove subprocess |     # TODO: https://github.com/pkgcore/pychroot/issues/22 try to do this more pythonically. then we can remove subprocess | ||||||
|     subprocess.call(['umount', '-lR', chrootdir]) |     subprocess.call(['umount', '-lR', chrootdir]) | ||||||
|  | |||||||
| @ -1,16 +1,10 @@ | |||||||
|  | #!/bin/env python3 | ||||||
| import host | import host | ||||||
| import prep | import prep | ||||||
| import bchroot | import bchroot | ||||||
|  | import build | ||||||
| 
 | 
 | ||||||
| # we need to: | # we need to: | ||||||
| # 1.) import the config- this gives us info about things like build paths, etc. host.parseConfig(host.getConfig()) should do this |  | ||||||
| # 2.) prep.dirChk |  | ||||||
| # 3.) prep.downloadTarball |  | ||||||
| # 4.) prep.unpackTarball |  | ||||||
| # 5.) prep.buildChroot |  | ||||||
| # 6.) prep.prepChroot |  | ||||||
| # 7.) bchroot.chrootCmd (TODO)- this should run the <chroot>/root/pre-build.sh script |  | ||||||
| # 7.5) ....figure out a way to get those dirs to *un*mount... and only mount in 7. if they're not currently mounted. |  | ||||||
| # 8.) build.chrootClean (TODO) see jenny_craig in old bdisk. i can *probably* do this within the chroot for the most part as part of pre-build.sh | # 8.) build.chrootClean (TODO) see jenny_craig in old bdisk. i can *probably* do this within the chroot for the most part as part of pre-build.sh | ||||||
| # 9.) build.genImg (TODO)- build the squashed image, etc. see will_it_blend in old bdisk | # 9.) build.genImg (TODO)- build the squashed image, etc. see will_it_blend in old bdisk | ||||||
| # 10.) build.genUEFI (TODO)- build the uefi binary/bootloading. see stuffy in old bdisk | # 10.) build.genUEFI (TODO)- build the uefi binary/bootloading. see stuffy in old bdisk | ||||||
| @ -22,18 +16,10 @@ if __name__ == '__main__': | |||||||
|     # TODO: config for chrootdir, dlpath |     # TODO: config for chrootdir, dlpath | ||||||
|     conf = host.parseConfig(host.getConfig())[1] |     conf = host.parseConfig(host.getConfig())[1] | ||||||
|     prep.dirChk(conf) |     prep.dirChk(conf) | ||||||
|     if conf['build']['multiarch']: |     prep.buildChroot(conf['build']) | ||||||
|         for arch in ('x86_64', 'i686'): |     prep.prepChroot(conf['build']['basedir'] + '/extra/templates', conf['build'], conf['bdisk']) | ||||||
|             #prep.unpackTarball(prep.downloadTarball(arch, '/var/tmp/bdisk'), '/var/tmp/chroot/' + arch) |     arch = conf['build']['arch'] | ||||||
|             prep.buildChroot(arch, '/var/tmp/chroot/' + arch, '/var/tmp/bdisk', conf['build']['basedir'] + '/extra') |     for a in arch: | ||||||
|             prep.prepChroot(conf['build']['basedir'] + '/extra/templates', '/var/tmp/chroot/' + arch, conf['bdisk'], arch) |         bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net') | ||||||
|             bchroot.chroot('/var/tmp/chroot/' + arch, 'bdisk.square-r00t.net') |         bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a) | ||||||
|             bchroot.chrootUnmount('/var/tmp/chroot/' + arch) |     build.chrootClean(conf['build']) | ||||||
|     else: |  | ||||||
|         # TODO: implement specific-arch building or separate building instances |  | ||||||
|         for arch in ('x86_64', 'i686'): |  | ||||||
|             #prep.unpackTarball(prep.downloadTarball(arch, '/var/tmp/bdisk'), '/var/tmp/chroot/' + arch) |  | ||||||
|             prep.buildChroot(arch, '/var/tmp/chroot/' + arch, '/var/tmp/bdisk', conf['build']['basedir'] + '/extra') |  | ||||||
|             prep.prepChroot(conf['build']['basedir'] + '/extra/templates', '/var/tmp/chroot/' + arch, conf['bdisk'], arch) |  | ||||||
|             bchroot.chroot('/var/tmp/chroot/' + arch, 'bdisk.square-r00t.net') |  | ||||||
|             bchroot.chrootUnmount('/var/tmp/chroot/' + arch) |  | ||||||
|  | |||||||
							
								
								
									
										112
									
								
								bdisk/build.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										112
									
								
								bdisk/build.py
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,112 @@ | |||||||
|  | import os | ||||||
|  | import tarfile | ||||||
|  | import shutil | ||||||
|  | import glob | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def chrootClean(build): | ||||||
|  |     chrootdir = build['chrootdir'] | ||||||
|  |     arch = build['arch'] | ||||||
|  |     backupdir = build['dlpath'] + '/' + 'bak' | ||||||
|  |     os.makedirs(backupdir, exist_ok = True) | ||||||
|  |     ## Save some stuff that needs to be retained. | ||||||
|  |     # Compress the pacman cache. | ||||||
|  |     for a in arch: | ||||||
|  |         os.makedirs(chrootdir + '/root.' + a + '/usr/local/pacman', exist_ok = True) | ||||||
|  |         tarball = chrootdir + '/root.' + a + '/usr/local/pacman/pacman.db.tar.xz' | ||||||
|  |         dbdir = chrootdir + '/root.' + a + '/var/lib/pacman/local' | ||||||
|  |         print("Now cleaning {0}/root.{1}. Please wait...".format(chrootdir, a)) | ||||||
|  |         if os.path.isfile(tarball): | ||||||
|  |             os.remove(tarball) | ||||||
|  |         with tarfile.open(name = tarball, mode = 'w:xz') as tar:  # if this balks, try x:xz | ||||||
|  |             tar.add(dbdir, arcname = os.path.basename(dbdir)) | ||||||
|  |         # Cut out the fat | ||||||
|  |         # The following are intended as "overrides" of the paths we'll be deleting. | ||||||
|  |         backup = {} | ||||||
|  |         backup['dirs'] = ['/var/lib/pacman/local'] | ||||||
|  |         backup['files'] = ['/usr/share/locale/locale.alias', | ||||||
|  |                         '/usr/share/zoneinfo/EST5EDT', | ||||||
|  |                         '/usr/share/zoneinfo/UTC', | ||||||
|  |                         '/usr/share/locale/en', | ||||||
|  |                         '/usr/share/locale/en_US', | ||||||
|  |                         '/usr/share/locale/en_GB'] | ||||||
|  |         # And these are what we remove. | ||||||
|  |         delete = {} | ||||||
|  |         delete['dirs'] = ['/usr/share/locale/*', | ||||||
|  |                         '/var/cache/pacman/*', | ||||||
|  |                         '/var/cache/pkgfile/*', | ||||||
|  |                         '/var/cache/apacman/pkg/*', | ||||||
|  |                         '/var/lib/pacman/*', | ||||||
|  |                         '/var/abs/local/yaourtbuild/*', | ||||||
|  |                         '/usr/share/zoneinfo', | ||||||
|  |                         '/root/.gnupg', | ||||||
|  |                         '/tmp/*', | ||||||
|  |                         '/var/tmp/*', | ||||||
|  |                         '/var/abs/*', | ||||||
|  |                         '/run/*', | ||||||
|  |                         '/boot/*', | ||||||
|  |                         '/usr/src/*', | ||||||
|  |                         '/var/log/*', | ||||||
|  |                         '/.git'] | ||||||
|  |         delete['files'] = ['/root/.bash_history', | ||||||
|  |                         '/root/apacman*', | ||||||
|  |                         '/root/iso.pkgs*', | ||||||
|  |                         '/root/packages.*', | ||||||
|  |                         '/root/pre-build.sh', | ||||||
|  |                         '/root/.viminfo', | ||||||
|  |                         '/root/.bashrc'] | ||||||
|  |         # First we backup files. We don't need to create backup['dirs'] | ||||||
|  |         # since they should be empty. If not, they go in backup['files']. | ||||||
|  |         for f in backup['files']: | ||||||
|  |             #os.makedirs(backupdir + '/root.' + a + os.path.dirname(f), exist_ok = True) | ||||||
|  |             #shutil.copy2(chrootdir + '/root.' + a + f, backupdir + '/root.' + a + f) | ||||||
|  |             for root, dirs, files in os.walk(f): | ||||||
|  |                 for item in files: | ||||||
|  |                     src_path = os.path.join(root, item) | ||||||
|  |                     dst_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, '')) | ||||||
|  |                     if os.path.exists(dst_path): | ||||||
|  |                         if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime: | ||||||
|  |                             shutil.copy2(src_path, dst_path) | ||||||
|  |                     else: | ||||||
|  |                         shutil.copy2(src_path, dst_path) | ||||||
|  |                 for item in dirs: | ||||||
|  |                     src_path = os.path.join(root, item) | ||||||
|  |                     dst_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, '')) | ||||||
|  |                     os.makedirs(dst_path, exist_ok = True) | ||||||
|  |         # Now we delete the above. | ||||||
|  |         for f in delete['files']: | ||||||
|  |             for x in glob.glob(chrootdir + '/root.' + a + f): | ||||||
|  |                 os.remove(x) | ||||||
|  |         for d in delete['dirs']: | ||||||
|  |             for x in glob.glob(chrootdir + '/root.' + a + d): | ||||||
|  |                 #os.remove(x) | ||||||
|  |                 shutil.rmtree(x) | ||||||
|  |         # And restore the dirs/files | ||||||
|  |         for d in backup['dirs']: | ||||||
|  |             os.makedirs(chrootdir + '/root.' + a + d, exist_ok = True) | ||||||
|  |         for f in backup['files']: | ||||||
|  |             #os.makedirs(chrootdir + '/root.' + a + os.path.dirname(f), exist_ok = True) | ||||||
|  |             #shutil.copy2(backupdir + '/root.' + a + f, chrootdir + '/root.' + a + f) | ||||||
|  |             for root, dirs, files in os.walk(f): | ||||||
|  |                 for item in files: | ||||||
|  |                     src_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, '')) | ||||||
|  |                     dst_path = os.path.join(root, item) | ||||||
|  |                     if os.path.exists(dst_path): | ||||||
|  |                         if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime: | ||||||
|  |                             shutil.copy2(src_path, dst_path) | ||||||
|  |                     else: | ||||||
|  |                         shutil.copy2(src_path, dst_path) | ||||||
|  |                 for item in dirs: | ||||||
|  |                     src_path = os.path.join(backupdir + '/root.' + a, src_path.replace(f, '')) | ||||||
|  |                     dst_path = os.path.join(root, item) | ||||||
|  |                     os.makedirs(dst_path, exist_ok = True) | ||||||
|  |         #shutil.rmtree(backupdir) | ||||||
|  | 
 | ||||||
|  | def genImg(): | ||||||
|  |     pass | ||||||
|  | 
 | ||||||
|  | def genUEFI(): | ||||||
|  |     pass | ||||||
|  | 
 | ||||||
|  | def genISO(): | ||||||
|  |     pass | ||||||
| @ -17,6 +17,10 @@ def getBits(): | |||||||
|     bits = list(platform.architecture())[0] |     bits = list(platform.architecture())[0] | ||||||
|     return(bits) |     return(bits) | ||||||
| 
 | 
 | ||||||
|  | def getHostname(): | ||||||
|  |     hostname = platform.node() | ||||||
|  |     return(hostname) | ||||||
|  | 
 | ||||||
| def getConfig(conf_file='/etc/bdisk/build.ini'): | def getConfig(conf_file='/etc/bdisk/build.ini'): | ||||||
|     conf = False |     conf = False | ||||||
|     # define some defailt conf paths in case we're installed by |     # define some defailt conf paths in case we're installed by | ||||||
| @ -55,12 +59,32 @@ def parseConfig(conf): | |||||||
|     # Convert the booleans to pythonic booleans in the dict... |     # Convert the booleans to pythonic booleans in the dict... | ||||||
|     config_dict['bdisk']['user'] = config['bdisk'].getboolean('user') |     config_dict['bdisk']['user'] = config['bdisk'].getboolean('user') | ||||||
|     config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar') |     config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar') | ||||||
|     config_dict['build']['multiarch'] = config['build'].getboolean('multiarch') |     config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower() | ||||||
|     for i in ('http', 'tftp', 'rsync', 'git'): |     for i in ('http', 'tftp', 'rsync', 'git'): | ||||||
|         config_dict['sync'][i] = config['sync'].getboolean(i) |         config_dict['sync'][i] = config['sync'].getboolean(i) | ||||||
|     config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso') |     config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso') | ||||||
|     config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb') |     config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb') | ||||||
|  |     # and build a list of arch(es) we want to build | ||||||
|  |     if config_dict['build']['multiarch'] in ('','yes','true','1'): | ||||||
|  |         config_dict['build']['arch'] = ['x86_64','i686'] | ||||||
|  |     elif config_dict['build']['multiarch'] == 'x86_64': | ||||||
|  |         config_dict['build']['arch'] = ['x86_64'] | ||||||
|  |     elif config_dict['build']['multiarch'] == 'i686': | ||||||
|  |         config_dict['build']['arch'] = ['i686'] | ||||||
|  |     else: | ||||||
|  |         exit(('ERROR: {0} is not a valid value. Check your configuration.').format( | ||||||
|  |                                 config_dict['build']['multiarch'])) | ||||||
|     ## VALIDATORS ## |     ## VALIDATORS ## | ||||||
|  |     # Validate bootstrap mirror | ||||||
|  |     if (validators.domain(config_dict['build']['mirror']) or validators.ipv4( | ||||||
|  |                                 config_dict['build']['mirror']) or validatords.ipv6( | ||||||
|  |                                 config_dict['build']['mirror'])): | ||||||
|  |         try: | ||||||
|  |             getaddrinfo(config_dict['build']['mirror'], None) | ||||||
|  |         except: | ||||||
|  |             exit(('ERROR: {0} does not resolve and cannot be used as a ' +  | ||||||
|  |                 'mirror for the bootstrap tarballs. Check your configuration.').format( | ||||||
|  |                     config_dict['build']['host'])) | ||||||
|     # Are we rsyncing? If so, validate the rsync host. |     # Are we rsyncing? If so, validate the rsync host. | ||||||
|     # Works for IP address too. It does NOT check to see if we can |     # Works for IP address too. It does NOT check to see if we can | ||||||
|     # actually *rsync* to it; that'll come later. |     # actually *rsync* to it; that'll come later. | ||||||
| @ -89,7 +113,7 @@ def parseConfig(conf): | |||||||
|                 "Check your configuration.").format(config_dict['build']['basedir'])) |                 "Check your configuration.").format(config_dict['build']['basedir'])) | ||||||
|     # Make dirs if they don't exist |     # Make dirs if they don't exist | ||||||
|     for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'): |     for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'): | ||||||
|         os.makedirs(config_dict['build'][d], exists_ok = True) |         os.makedirs(config_dict['build'][d], exist_ok = True) | ||||||
|     # Make dirs for sync staging if we need to |     # Make dirs for sync staging if we need to | ||||||
|     for x in ('http', 'tftp'): |     for x in ('http', 'tftp'): | ||||||
|         if config_dict['sync'][x]: |         if config_dict['sync'][x]: | ||||||
|  | |||||||
							
								
								
									
										205
									
								
								bdisk/prep.py
									
									
									
									
									
								
							
							
						
						
									
										205
									
								
								bdisk/prep.py
									
									
									
									
									
								
							| @ -10,107 +10,115 @@ import git | |||||||
| import jinja2 | import jinja2 | ||||||
| import datetime | import datetime | ||||||
| from urllib.request import urlopen | from urllib.request import urlopen | ||||||
| 
 | import host  # bdisk.host | ||||||
| def archChk(arch): |  | ||||||
|     if arch in ['i686', 'x86_64']: |  | ||||||
|         return(arch) |  | ||||||
|     else: |  | ||||||
|         exit("{0} is not a valid architecture. Must be one of i686 or x86_64.".format(arch)) |  | ||||||
| 
 | 
 | ||||||
| def dirChk(config_dict): | def dirChk(config_dict): | ||||||
|     # Make dirs if they don't exist |     # Make dirs if they don't exist | ||||||
|     for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'): |     for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'tempdir'): | ||||||
|         os.makedirs(config_dict['build'][d], exists_ok = True) |         os.makedirs(config_dict['build'][d], exist_ok = True) | ||||||
|     # Make dirs for sync staging if we need to |     # Make dirs for sync staging if we need to | ||||||
|     for x in ('http', 'tftp'): |     for x in ('http', 'tftp'): | ||||||
|         if config_dict['sync'][x]: |         if config_dict['sync'][x]: | ||||||
|             os.makedirs(config_dict[x]['path'], exist_ok = True) |             os.makedirs(config_dict[x]['path'], exist_ok = True) | ||||||
| 
 | 
 | ||||||
| def downloadTarball(arch, dlpath): | def downloadTarball(build): | ||||||
|     # arch - should be i686 or x86_64 |     dlpath = build['dlpath'] | ||||||
|  |     arch = build['arch'] | ||||||
|  |     #mirror = 'http://mirrors.kernel.org/archlinux' | ||||||
|  |     mirror = build['mirrorproto'] + '://' + build['mirror'] | ||||||
|  |     rlsdir = mirror + build['mirrorpath'] | ||||||
|  |     sha_in = urlopen(mirror + build['mirrorchksum']) | ||||||
|     # returns path/filename e.g. /some/path/to/file.tar.gz |     # returns path/filename e.g. /some/path/to/file.tar.gz | ||||||
|     # we use .gnupg since we'll need it later. |     # we use .gnupg since we'll need it later. | ||||||
|     archChk(arch) |     os.makedirs(dlpath + '/.gnupg', exist_ok = True) | ||||||
|     try: |     tarball_path = {} | ||||||
|         os.makedirs(dlpath + '/.gnupg') |     for x in arch: | ||||||
|     except OSError as exception: |         tarball_path[x] = dlpath + '/.latest.' + x + '.tar' | ||||||
|         if exception.errno != errno.EEXIST: |  | ||||||
|             raise |  | ||||||
|     tarball_path = dlpath + '/.latest.' + arch + '.tar.gz' |  | ||||||
|     #mirror = 'http://mirrors.kernel.org/archlinux' |  | ||||||
|     mirror = 'https://mirror.us.leaseweb.net/archlinux' |  | ||||||
|     rlsdir = mirror + '/iso/latest' |  | ||||||
|     sha_in = urlopen(rlsdir + '/sha1sums.txt') |  | ||||||
|     sha1sums = sha_in.read() |     sha1sums = sha_in.read() | ||||||
|     sha_in.close() |     sha_in.close() | ||||||
|     sha1_list = sha1sums.decode("utf-8") |     sha_raw = sha1sums.decode("utf-8") | ||||||
|     sha_list = list(filter(None, sha1_list.split('\n'))) |     sha_list = list(filter(None, sha_raw.split('\n'))) | ||||||
|     sha_dict = {x.split()[1]: x.split()[0] for x in sha_list} |     sha_dict = {x.split()[1]: x.split()[0] for x in sha_list} | ||||||
|     pattern = re.compile('^archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-' + arch + '\.tar\.gz$') |  | ||||||
|     tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0] |  | ||||||
|     sha1 = sha_dict[tarball] |  | ||||||
|     # all that lousy work just to get a sha1 sum. okay. so. |     # all that lousy work just to get a sha1 sum. okay. so. | ||||||
|     if os.path.isfile(tarball_path): |     if build['mirrorgpgsig'] != '': | ||||||
|         pass |  | ||||||
|     else: |  | ||||||
|         # fetch the tarball... |  | ||||||
|         print("Fetching the tarball for {0} architecture, please wait...".format(arch)) |  | ||||||
|         tarball_dl = urlopen(rlsdir + tarball) |  | ||||||
|         with open(dlpath + '/latest.' + arch + '.tar.gz', 'wb') as f: |  | ||||||
|             f.write(tarball_dl) |  | ||||||
|         tarball_dl.close() |  | ||||||
|     tarball_hash = hashlib.sha1(open(tarball_path, 'rb').read()).hexdigest() |  | ||||||
|     if tarball_hash != sha1: |  | ||||||
|         exit("There was a failure fetching the tarball and the wrong version exists on the filesystem.\nPlease try again later.") |  | ||||||
|     else: |  | ||||||
|         # okay, so the sha1 matches. let's verify the signature. |  | ||||||
|         # we don't want to futz with the user's normal gpg. |         # we don't want to futz with the user's normal gpg. | ||||||
|         gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg') |         gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg') | ||||||
|  |         print("\nNow generating a GPG key. Please wait...") | ||||||
|  |         # python-gnupg 0.3.9 spits this error in Arch. it's harmless, but ugly af. | ||||||
|  |         # TODO: remove this when the error doesn't happen anymore. | ||||||
|  |         print("If you see a \"ValueError: Unknown status message: 'KEY_CONSIDERED'\" error, it can be safely ignored.") | ||||||
|  |         print("If this is taking a VERY LONG time, try installing haveged and starting it. This can be " + | ||||||
|  |                         "done safely in parallel with the build process.\n") | ||||||
|         input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase') |         input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase') | ||||||
|         key = gpg.gen_key(input_data) |         key = gpg.gen_key(input_data) | ||||||
|         keyid = '7F2D434B9741E8AC' |         keyid = build['gpgkey'] | ||||||
|         gpg.recv_keys('pgp.mit.edu', keyid) |         gpg.recv_keys(build['gpgkeyserver'], keyid) | ||||||
|         gpg_sig = tarball + '.sig' |     for a in arch: | ||||||
|         sig_dl = urlopen(rlsdir + gpg_sig) |         pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$') | ||||||
|         with open(tarball_path + '.sig', 'wb+') as f: |         tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0] | ||||||
|             f.write(sig_dl) |         sha1 = sha_dict[tarball] | ||||||
|         sig_dl.close() |         if os.path.isfile(tarball_path[a]): | ||||||
|         sig = tarball_path + '.sig' |             pass | ||||||
|         tarball_data = open(tarball_path, 'rb') |         else: | ||||||
|         tarball_data_in = tarball_data.read() |             # fetch the tarball... | ||||||
|         gpg_verify = gpg.verify_data(sig, tarball_data_in) |             print("Fetching the tarball for {0} architecture, please wait...".format(a)) | ||||||
|         tarball_data.close() |             #dl_file = urllib.URLopener() | ||||||
|         if not gpg_verify: |             tarball_dl = urlopen(rlsdir + tarball) | ||||||
|             exit("There was a failure checking the signature of the release tarball. Please investigate.") |             with open(tarball_path[a], 'wb') as f: | ||||||
|         os.remove(sig) |                 f.write(tarball_dl.read()) | ||||||
|  |             tarball_dl.close() | ||||||
|  |         print(("Checking that the hash checksum for {0} matches {1}, please wait...").format( | ||||||
|  |                                 tarball_path[a], sha1)) | ||||||
|  |         tarball_hash = hashlib.sha1(open(tarball_path[a], 'rb').read()).hexdigest() | ||||||
|  |         if tarball_hash != sha1: | ||||||
|  |             exit(("There was a failure fetching {0} and the wrong version exists on the filesystem.\n" + | ||||||
|  |                                 "Please try again later.").format(tarball)) | ||||||
|  |         elif build['mirrorgpgsig'] != '': | ||||||
|  |             # okay, so the sha1 matches. let's verify the signature. | ||||||
|  |             if build['mirrorgpgsig'] == '.sig': | ||||||
|  |                 gpgsig_remote = rlsdir + tarball + '.sig' | ||||||
|  |             else: | ||||||
|  |                 gpgsig_remote = mirror + build['mirrorgpgsig'] | ||||||
|  |             gpg_sig = tarball + '.sig' | ||||||
|  |             sig_dl = urlopen(gpgsig_remote) | ||||||
|  |             sig = tarball_path[a] + '.sig' | ||||||
|  |             with open(sig, 'wb+') as f: | ||||||
|  |                 f.write(sig_dl.read()) | ||||||
|  |             sig_dl.close() | ||||||
|  |             tarball_data = open(tarball_path[a], 'rb') | ||||||
|  |             tarball_data_in = tarball_data.read() | ||||||
|  |             gpg_verify = gpg.verify_data(sig, tarball_data_in) | ||||||
|  |             tarball_data.close() | ||||||
|  |             if not gpg_verify: | ||||||
|  |                 exit("There was a failure checking {0} against {1}. Please investigate.".format( | ||||||
|  |                                  sig, tarball_path[a])) | ||||||
|  |             os.remove(sig) | ||||||
| 
 | 
 | ||||||
|     return(tarball_path) |     return(tarball_path) | ||||||
| 
 | 
 | ||||||
| def unpackTarball(tarball_path, chrootdir): | def unpackTarball(tarball_path, build): | ||||||
|  |     chrootdir = build['chrootdir'] | ||||||
|     # Make the dir if it doesn't exist |     # Make the dir if it doesn't exist | ||||||
|     try: |     shutil.rmtree(chrootdir, ignore_errors = True) | ||||||
|         os.makedirs(chrootdir) |     os.makedirs(chrootdir, exist_ok = True) | ||||||
|     except OSError as exception: |     print("Now extracting the tarball(s). Please wait...") | ||||||
|         if exception.errno != errno.EEXIST: |  | ||||||
|             raise |  | ||||||
|     # Open and extract the tarball |     # Open and extract the tarball | ||||||
|     tar = tarfile.open(tarball_path, 'r:gz') |     for a in build['arch']: | ||||||
|     tar.extractall(path = destdir) |         tar = tarfile.open(tarball_path[a], 'r:gz') | ||||||
|     tar.close() |         tar.extractall(path = chrootdir) | ||||||
|     return(True) |         tar.close() | ||||||
|  |         print("Extraction for {0} finished.".format(tarball_path[a])) | ||||||
| 
 | 
 | ||||||
| def buildChroot(arch, chrootdir, dlpath, extradir): | def buildChroot(build): | ||||||
|     unpack_me = unpackTarball(downloadTarball(archChk(arch), dlpath), chrootdir) |     dlpath = build['dlpath'] | ||||||
|     if unpack_me: |     chrootdir = build['chrootdir'] | ||||||
|         pass |     arch = build['arch'] | ||||||
|     else: |     extradir = build['basedir'] + '/extra' | ||||||
|         exit("Something went wrong when trying to unpack the tarball.") |     unpack_me = unpackTarball(downloadTarball(build), build) | ||||||
| 
 |  | ||||||
|     print("The download and extraction has completed. Now prepping the chroot environment with some additional changes.") |  | ||||||
|     # build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes. |     # build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes. | ||||||
|     prebuild_overlay = {} |     prebuild_overlay = {} | ||||||
|     prebuild_arch_overlay = {} |     prebuild_arch_overlay = {} | ||||||
|     for x in ['i686', 'x86_64']: |     for x in arch: | ||||||
|         prebuild_arch_overlay[x] = {} |         prebuild_arch_overlay[x] = {} | ||||||
|         for y in ['files', 'dirs']: |         for y in ['files', 'dirs']: | ||||||
|             prebuild_overlay[y] = [] |             prebuild_overlay[y] = [] | ||||||
| @ -129,40 +137,45 @@ def buildChroot(arch, chrootdir, dlpath, extradir): | |||||||
|         prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))] |         prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))] | ||||||
|     prebuild_overlay['dirs'].remove('/') |     prebuild_overlay['dirs'].remove('/') | ||||||
|     # create the dir structure. these should almost definitely be owned by root. |     # create the dir structure. these should almost definitely be owned by root. | ||||||
|     for dir in prebuild_overlay['dirs']: |     for a in arch: | ||||||
|         os.makedirs(chrootdir + '/' + dir, exist_ok = True) |         for dir in prebuild_overlay['dirs']: | ||||||
|         os.chown(chrootdir + '/' + dir, 0, 0) |             os.makedirs(chrootdir + '/root.' + a + '/' + dir, exist_ok = True) | ||||||
|     # and copy over the files. again, chown to root. |             os.chown(chrootdir + '/root.' + a + '/' + dir, 0, 0) | ||||||
|     for file in prebuild_overlay['files']: |         # and copy over the files. again, chown to root. | ||||||
|         shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/' + file) |         for file in prebuild_overlay['files']: | ||||||
|         os.chown(chrootdir + '/' + file, 0, 0) |             shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/root.' + a + '/' + file) | ||||||
|     # do the same for arch-specific stuff. |             os.chown(chrootdir + '/root.' + a + '/' + file, 0, 0) | ||||||
|     for dir in prebuild_arch_overlay[arch]['dirs']: |         # do the same for arch-specific stuff. | ||||||
|         os.makedirs(chrootdir + '/' + dir, exist_ok = True) |         for dir in prebuild_arch_overlay[a]['dirs']: | ||||||
|         os.chown(chrootdir + '/' + dir, 0, 0) |             os.makedirs(chrootdir + '/root.' + a + '/' + dir, exist_ok = True) | ||||||
|     for file in prebuild_arch_overlay[arch]['files']: |             os.chown(chrootdir + '/root.' + a + '/' + dir, 0, 0) | ||||||
|         shutil.copy2(extradir + '/pre-build.d/' + arch + '/' + file, chrootdir + '/' + file) |         for file in prebuild_arch_overlay[a]['files']: | ||||||
|         os.chown(chrootdir + '/' + file, 0, 0) |             shutil.copy2(extradir + '/pre-build.d/' + a + '/' + file, chrootdir + '/root.' + a + '/' + file) | ||||||
|     return(chrootdir) |             os.chown(chrootdir + '/root.' + a + '/' + file, 0, 0) | ||||||
| 
 | 
 | ||||||
| def prepChroot(templates_dir, chrootdir, bdisk, arch): | def prepChroot(templates_dir, build, bdisk): | ||||||
|  |     chrootdir = build['chrootdir'] | ||||||
|  |     arch = build['arch'] | ||||||
|  |     bdisk_repo_dir = build['basedir'] | ||||||
|     build = {} |     build = {} | ||||||
|     # let's prep some variables to write out the version info.txt |     # let's prep some variables to write out the version info.txt | ||||||
|     # get the git tag and short commit hash |     # get the git tag and short commit hash | ||||||
|     repo = git.Repo(bdisk['dir']) |     repo = git.Repo(bdisk_repo_dir) | ||||||
|     refs = repo.git.describe(repo.head.commit).split('-') |     refs = repo.git.describe(repo.head.commit).split('-') | ||||||
|     build['ver'] = refs[0] + '-' + refs[2] |     build['ver'] = refs[0] + '-' + refs[2] | ||||||
|     # and these should be passed in from the args, from the most part. |     # and these should be passed in from the args, from the most part. | ||||||
|     build['name'] = bdisk['name'] |     build['name'] = bdisk['name'] | ||||||
|     build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y") |     build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y") | ||||||
|     build['host'] = bdisk['hostname'] |     hostname = host.getHostname | ||||||
|     build['user'] = os.environ['USER'] |     build['user'] = os.environ['USER'] | ||||||
|     if os.environ['SUDO_USER']: |     if 'SUDO_USER' in os.environ: | ||||||
|         build['realuser'] = os.environ['SUDO_USER'] |         build['realuser'] = os.environ['SUDO_USER'] | ||||||
|     # and now that we have that dict, let's write out the VERSION_INFO.txt file. |     # and now that we have that dict, let's write out the VERSION_INFO.txt file. | ||||||
|     env = jinja2.Environment(loader=FileSystemLoader(templates_dir)) |     loader = jinja2.FileSystemLoader(templates_dir) | ||||||
|  |     env = jinja2.Environment(loader = loader) | ||||||
|     tpl = env.get_template('VERSION_INFO.txt.j2') |     tpl = env.get_template('VERSION_INFO.txt.j2') | ||||||
|     tpl_out = template.render(build = build) |     tpl_out = tpl.render(build = build, hostname = hostname) | ||||||
|     with open(chrootdir + '/root/VERSION_INFO.txt', "wb+") as f: |     for a in arch: | ||||||
|         fh.write(tpl_out) |         with open(chrootdir + '/root.' + a + '/root/VERSION_INFO.txt', "w+") as f: | ||||||
|  |             f.write(tpl_out) | ||||||
|     return(build) |     return(build) | ||||||
|  | |||||||
| @ -123,6 +123,72 @@ password = | |||||||
| #---------------------------------------------------------# | #---------------------------------------------------------# | ||||||
| [build] | [build] | ||||||
| 
 | 
 | ||||||
|  | ; What is the mirror for your bootstrap tarball? | ||||||
|  | ; It is *highly* recommended you use an Arch Linux tarball | ||||||
|  | ; as the build process is highly specialized to this. | ||||||
|  | ; 0.) No whitespace | ||||||
|  | ; 1.) Must be accessible remotely (no local file paths) | ||||||
|  | mirror = mirror.us.leaseweb.net | ||||||
|  | 
 | ||||||
|  | ; What is the protocol for the bootstrap mirror? | ||||||
|  | ; 0.) Must be one of: | ||||||
|  | ; 	http, https, ftp | ||||||
|  | mirrorproto = https | ||||||
|  | 
 | ||||||
|  | ; What is the path to the tarball directory? | ||||||
|  | ; 0.) Must be a complete path | ||||||
|  | ; 	(e.g. /dir1/subdir1/subdir2/ | ||||||
|  | ; 1.) No whitespace | ||||||
|  | mirrorpath = /archlinux/iso/latest/ | ||||||
|  | 
 | ||||||
|  | ; What is the filename for the tarball found in the above? | ||||||
|  | ; If left blank, we will use the sha1 checksum file to try | ||||||
|  | ; to guess the most recent file. | ||||||
|  | mirrorfile =  | ||||||
|  | 
 | ||||||
|  | ; What is the path to a sha1 checksum file? | ||||||
|  | ; 0.) No whitespace | ||||||
|  | ; 1.) Must be the full path | ||||||
|  | ; 2.) Don't include the mirror domain or protocol | ||||||
|  | mirrorchksum = ${mirrorpath}sha1sums.txt | ||||||
|  | 
 | ||||||
|  | ; Optional GPG checking. | ||||||
|  | ; If the file has a GPG signature file, | ||||||
|  | ; we can use it for extra checking. | ||||||
|  | ; If it's blank, GPG checking will be disabled. | ||||||
|  | ; If you specify just '.sig' (or use the default | ||||||
|  | ; and don't actually specify a mirrorfile), | ||||||
|  | ; we'll try to guess based on the file from the sha1 | ||||||
|  | ; checksums. | ||||||
|  | ; 0.) No whitespace (if specified) | ||||||
|  | ; 1.) Must be the full path | ||||||
|  | ; 2.) Don't include the mirror domain or protocol | ||||||
|  | mirrorgpgsig = ${mirrorfile}.sig | ||||||
|  | 
 | ||||||
|  | ; What is a valid key ID that should be used to | ||||||
|  | ; verify the tarballs? | ||||||
|  | ; 0.) Only used if mirrorgpgsig is set | ||||||
|  | ; 1.) Should be in the "shortform" | ||||||
|  | ;	(e.g. 7F2D434B9741E8AC) | ||||||
|  | gpgkey = 7F2D434B9741E8AC | ||||||
|  | 
 | ||||||
|  | ; What is a valid keyserver we should use | ||||||
|  | ; to fetch gpgkey? | ||||||
|  | ; 0.) Only used if mirrorgpgsig is set | ||||||
|  | ; 1.) The default is probably fine. | ||||||
|  | gpgkeyserver = pgp.mit.edu | ||||||
|  | 
 | ||||||
|  | ; Where should we save the bootstrap tarballs? | ||||||
|  | ; 0.) No whitespace | ||||||
|  | ; 1.) Will be created if it doesn't exist | ||||||
|  | dlpath = /var/tmp/${bdisk:uxname} | ||||||
|  | 
 | ||||||
|  | ; Where should the bootstrap tarballs extract to and the | ||||||
|  | ; chroots be built? | ||||||
|  | ; 0.) No whitespace | ||||||
|  | ; 1.) Will be created if it doesn't exist | ||||||
|  | chrootdir = /var/tmp/chroots | ||||||
|  | 
 | ||||||
| ; Where is the base of the BDisk project located? | ; Where is the base of the BDisk project located? | ||||||
| ; In other words, if you cloned BDisk from git, | ; In other words, if you cloned BDisk from git, | ||||||
| ; what is BDisk's working tree directory? | ; what is BDisk's working tree directory? | ||||||
| @ -164,9 +230,10 @@ mountpt = /mnt/${bdisk:uxname} | |||||||
| ; Should we build a multiarch image? That is to say, the | ; Should we build a multiarch image? That is to say, the | ||||||
| ; same ISO file can be used for both i686 and x86_64. | ; same ISO file can be used for both i686 and x86_64. | ||||||
| ; 0.) Only accepts (case-insensitive): | ; 0.) Only accepts (case-insensitive): | ||||||
| ;	yes|no | ;	yes/true (buld both i686, x86_64 in same image) | ||||||
| ;	true|false | ;	no/false (build separate images, both arch's) | ||||||
| ;	1|0 | ;	i686	 (ONLY build i686 architecture) | ||||||
|  | ;	x86_64	 (ONLY build x86_64 architecture) | ||||||
| ; If it is undefined, it is assumed to be no. | ; If it is undefined, it is assumed to be no. | ||||||
| multiarch = yes | multiarch = yes | ||||||
| 
 | 
 | ||||||
| @ -181,10 +248,9 @@ multiarch = yes | |||||||
| ipxe = yes | ipxe = yes | ||||||
| 
 | 
 | ||||||
| ; This option should only be enabled if you are on a fairly | ; This option should only be enabled if you are on a fairly | ||||||
| ;	powerful, multicore system with plenty of RAM. | ; powerful, multicore system with plenty of RAM. It will | ||||||
| ; It will speed the build process along, but will have | ; speed the build process along, but will have some | ||||||
| ;	some seriously adverse effects if your system | ; seriously adverse effects if your system can't handle it. | ||||||
| ;	can't handle it. |  | ||||||
| ; Most modern systems should be fine with leaving it enabled. | ; Most modern systems should be fine with leaving it enabled. | ||||||
| ; 0.) Only accepts (case-insensitive): | ; 0.) Only accepts (case-insensitive): | ||||||
| ;	yes|no | ;	yes|no | ||||||
| @ -213,8 +279,8 @@ http = yes | |||||||
| 
 | 
 | ||||||
| ; Should we generate/prepare TFTP files? | ; Should we generate/prepare TFTP files? | ||||||
| ; This is mostly only useful if you plan on using more | ; This is mostly only useful if you plan on using more | ||||||
| ;	traditional (non-iPXE) setups and regualar PXE | ; traditional (non-iPXE) setups and regualar PXE bootstrapping | ||||||
| ;	bootstrapping into iPXE. | ; into iPXE. | ||||||
| ; 0.) Only accepts (case-insensitive): | ; 0.) Only accepts (case-insensitive): | ||||||
| ;	yes|no | ;	yes|no | ||||||
| ;	true|false | ;	true|false | ||||||
| @ -223,19 +289,18 @@ http = yes | |||||||
| tftp = yes | tftp = yes | ||||||
| 
 | 
 | ||||||
| ; Enable automatic Git pushing for any changes done to the | ; Enable automatic Git pushing for any changes done to the | ||||||
| ;	project itself? | ; project itself? If you don't have upstream write access, | ||||||
| ; If you don't have upstream write access, you'll want to | ; you'll want to set this to False. | ||||||
| ;	set this to False. |  | ||||||
| ; 0.) Only accepts (case-insensitive): | ; 0.) Only accepts (case-insensitive): | ||||||
| ;	yes|no | ;	yes|no | ||||||
| ;	true|false | ;	true|false | ||||||
| ;	1|0 | ;	1|0 | ||||||
| ; If it is undefined, it is assumed to be no. | ; If it is undefined, it is assumed to be no. | ||||||
| git = yes | git = no | ||||||
| 
 | 
 | ||||||
| ; Enable rsync pushing for the ISO (and other files, if | ; Enable rsync pushing for the ISO (and other files, if | ||||||
| ; you choose- useful for iPXE over HTTP(S)). | ; you choose- useful for iPXE over HTTP(S)). | ||||||
| rsync = yes | rsync = no | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| #---------------------------------------------------------# | #---------------------------------------------------------# | ||||||
| @ -275,8 +340,8 @@ group = http | |||||||
| path = ${build:basedir}/tftpboot | path = ${build:basedir}/tftpboot | ||||||
| 
 | 
 | ||||||
| ; What user and group, if applicable, should the TFTP files | ; What user and group, if applicable, should the TFTP files | ||||||
| ;	be owned as? This is most likely going to be either | ; be owned as? This is most likely going to be either 'tftp' | ||||||
| ;	'tftp' or 'root'. | ; or 'root'. | ||||||
| ; 0.) No whitespace | ; 0.) No whitespace | ||||||
| ; 1.) User must exist on system | ; 1.) User must exist on system | ||||||
| ; 2.) If sync:tftpdir is blank, they will not be used | ; 2.) If sync:tftpdir is blank, they will not be used | ||||||
| @ -291,9 +356,9 @@ group = root | |||||||
| [ipxe] | [ipxe] | ||||||
| 
 | 
 | ||||||
| ; Build a "mini-ISO"; that is, an ISO file that can be used | ; Build a "mini-ISO"; that is, an ISO file that can be used | ||||||
| ;	to bootstrap an iPXE environment (so you don't need | ; to bootstrap an iPXE environment (so you don't need to set | ||||||
| ;	to set up a traditional PXE environment on your LAN). | ; up a traditional PXE environment on your LAN). We'll still | ||||||
| ;We'll still build a full standalone ISO no matter what. | ; build a full standalone ISO no matter what. | ||||||
| ; 0.) Only accepts (case-insensitive): | ; 0.) Only accepts (case-insensitive): | ||||||
| ;	yes|no | ;	yes|no | ||||||
| ;	true|false | ;	true|false | ||||||
| @ -301,13 +366,12 @@ group = root | |||||||
| ; If it is undefined, it is assumed to be no. | ; If it is undefined, it is assumed to be no. | ||||||
| iso = yes | iso = yes | ||||||
| 
 | 
 | ||||||
| ; Build a "mini-USB" image? Same concept as the ISO file | ; Build a "mini-USB" image? Same concept as the ISO file but | ||||||
| ;	but this can be dd'd onto a USB thumbdrive for the | ; this can be dd'd onto a USB thumbdrive for the same effect. | ||||||
| ;	same effect. |  | ||||||
| usb = yes | usb = yes | ||||||
| 
 | 
 | ||||||
| ; What URI should iPXE's EMBED script use? | ; What URI should iPXE's EMBED script use? DO NOT USE A | ||||||
| ; DO NOT USE A ',' (comma); instead, replace it with: | ; ',' (comma); instead, replace it with: | ||||||
| ;	%%COMMA%% | ;	%%COMMA%% | ||||||
| ; If you require HTTP BASIC Authentication or HTTP Digest | ; If you require HTTP BASIC Authentication or HTTP Digest | ||||||
| ; Authentication (untested), you can format it via: | ; Authentication (untested), you can format it via: | ||||||
| @ -315,7 +379,7 @@ usb = yes | |||||||
| ;	https://user:password@domain.tld/page.php | ;	https://user:password@domain.tld/page.php | ||||||
| ; | ; | ||||||
| ; This currently does not work for HTTPS with self-signed | ; This currently does not work for HTTPS with self-signed | ||||||
| ;	certificates. | ; certificates. | ||||||
| ; 0.) REQUIRED if iso and/or usb is set to True/yes/etc. | ; 0.) REQUIRED if iso and/or usb is set to True/yes/etc. | ||||||
| ; 1.) Must be a valid URI understood by minimal versions | ; 1.) Must be a valid URI understood by minimal versions | ||||||
| ;	of curl. | ;	of curl. | ||||||
| @ -324,15 +388,15 @@ uri = https://bdisk.square-r00t.net | |||||||
| ; Path to the (root) CA certificate file iPXE should use. | ; Path to the (root) CA certificate file iPXE should use. | ||||||
| ; Note that you can use your own CA to sign existing certs. | ; Note that you can use your own CA to sign existing certs. | ||||||
| ; See http://ipxe.org/crypto for more info. This is handy if | ; See http://ipxe.org/crypto for more info. This is handy if | ||||||
| ;	you run a third-party/"Trusted" root-CA-signed | ; you run a third-party/"Trusted" root-CA-signed certificate | ||||||
| ;	certificate for the HTTPS target. | ; for the HTTPS target. | ||||||
| ; 0.) No whitespace | ; 0.) No whitespace | ||||||
| ; 1.) Must be in PEM/X509 format | ; 1.) Must be in PEM/X509 format | ||||||
| ; 2.) REQUIRED if iso and/or usb is set to True/yes/etc. | ; 2.) REQUIRED if iso and/or usb is set to True/yes/etc. | ||||||
| ; 3.) If specified, a matching key (ssl_cakey) MUST be | ; 3.) If specified, a matching key (ssl_cakey) MUST be | ||||||
| ;	specified | ;     specified | ||||||
| ; 4.) HOWEVER, if left blank, one will be automatically | ; 4.) HOWEVER, if left blank, one will be automatically | ||||||
| ;	generated | ;     generated | ||||||
| ssl_ca = | ssl_ca = | ||||||
| 
 | 
 | ||||||
| ; Path to the (root) CA key file iPXE should use. | ; Path to the (root) CA key file iPXE should use. | ||||||
|  | |||||||
| @ -1,5 +1,8 @@ | |||||||
| #!/bin/bash | #!/bin/bash | ||||||
| 
 | 
 | ||||||
|  | # we need this fix before anything. | ||||||
|  | dirmngr </dev/null > /dev/null 2>&1 | ||||||
|  | 
 | ||||||
| # Import settings. | # Import settings. | ||||||
| if [[ -f /root/VARS.txt ]]; | if [[ -f /root/VARS.txt ]]; | ||||||
| then | then | ||||||
|  | |||||||
| @ -1,5 +1,5 @@ | |||||||
| Version:        {{ build['ver'] }} | Version:        {{ build['ver'] }} | ||||||
| Build:          {{ build['name'] }} | Build:          {{ build['name'] }} | ||||||
| Time:           {{ build['time'] }} | Time:           {{ build['time'] }} | ||||||
| Machine:        {{ build['host'] }} | Machine:        {{ hostname }} | ||||||
| User:           {{ build['user'] }}{% if build['realuser'] is defined and build['realuser'] > 0 %} ({{ build['realuser'] }}){% endif %} | User:           {{ build['user'] }}{% if build['realuser'] is defined and build['realuser'] > 0 %} ({{ build['realuser'] }}){% endif %} | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user