stubbing out 4.x
This commit is contained in:
parent
aaf03db8bd
commit
c6a837d1fe
228
bdisk/bGPG.py
228
bdisk/bGPG.py
@ -1,228 +0,0 @@
|
|||||||
import os
|
|
||||||
from io import BytesIO
|
|
||||||
import subprocess
|
|
||||||
import datetime
|
|
||||||
import jinja2
|
|
||||||
import gpgme
|
|
||||||
import psutil
|
|
||||||
|
|
||||||
def genGPG(conf):
|
|
||||||
# https://media.readthedocs.org/pdf/pygpgme/latest/pygpgme.pdf
|
|
||||||
build = conf['build']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
gpghome = conf['gpg']['mygpghome']
|
|
||||||
distkeys = []
|
|
||||||
gpgkeyserver = []
|
|
||||||
for a in conf['build']['arch']:
|
|
||||||
keysrv = conf['src'][a]['gpgkeyserver']
|
|
||||||
distkey = conf['src'][a]['gpgkey']
|
|
||||||
if keysrv and (keysrv not in gpgkeyserver):
|
|
||||||
gpgkeyserver.append(keysrv)
|
|
||||||
if distkey and(distkey not in distkeys):
|
|
||||||
distkeys.append(distkey)
|
|
||||||
templates_dir = '{0}/extra/templates'.format(build['basedir'])
|
|
||||||
mykey = False
|
|
||||||
pkeys = []
|
|
||||||
killStaleAgent(conf)
|
|
||||||
if conf['gpg']['mygpgkey'] != '':
|
|
||||||
mykey = conf['gpg']['mygpgkey']
|
|
||||||
if gpghome == '':
|
|
||||||
# Let's try the default.
|
|
||||||
gpghome = '{0}/.gnupg'.format(os.path.expanduser("~"))
|
|
||||||
else:
|
|
||||||
# No key ID was specified.
|
|
||||||
if gpghome == '':
|
|
||||||
# We'll generate a key if we can't find one here.
|
|
||||||
gpghome = build['dlpath'] + '/.gnupg'
|
|
||||||
killStaleAgent(conf)
|
|
||||||
os.environ['GNUPGHOME'] = gpghome
|
|
||||||
gpg = gpgme.Context()
|
|
||||||
# do we need to add a keyserver?
|
|
||||||
if len(gpgkeyserver) != 0:
|
|
||||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
|
||||||
for s in gpgkeyserver:
|
|
||||||
if os.path.isfile(dirmgr):
|
|
||||||
with open(dirmgr, 'r+') as f:
|
|
||||||
findme = any(s in line for line in f)
|
|
||||||
if not findme:
|
|
||||||
f.seek(0, os.SEEK_END)
|
|
||||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
|
||||||
bdisk['pname'],
|
|
||||||
s))
|
|
||||||
if mykey:
|
|
||||||
try:
|
|
||||||
pkeys.append(gpg.get_key(mykey, True))
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: You specified using {1} but we have no secret key for that ID!'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
mykey))
|
|
||||||
else:
|
|
||||||
for key in gpg.keylist(None, True):
|
|
||||||
if key.can_sign:
|
|
||||||
pkeys.append(key)
|
|
||||||
break
|
|
||||||
if len(pkeys) == 0:
|
|
||||||
print("{0}: [GPG] Generating a GPG key...".format(datetime.datetime.now()))
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
tpl = env.get_template('GPG.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
privkey = gpg.get_key(gpg.genkey(tpl_out).fpr, True)
|
|
||||||
pkeys.append(privkey)
|
|
||||||
# do we need to add a keyserver? this is for the freshly-generated GNUPGHOME
|
|
||||||
if len(gpgkeyserver) != 0:
|
|
||||||
dirmgr = '{0}/dirmngr.conf'.format(gpghome)
|
|
||||||
for s in gpgkeyserver:
|
|
||||||
with open(dirmgr, 'r+') as f:
|
|
||||||
findme = any(s in line for line in f)
|
|
||||||
if not findme:
|
|
||||||
f.seek(0, os.SEEK_END)
|
|
||||||
f.write("\n# Added by {0}.\nkeyserver {1}\n".format(
|
|
||||||
bdisk['pname'],
|
|
||||||
s))
|
|
||||||
gpg.signers = pkeys
|
|
||||||
# Now we try to find and add the key for the base image.
|
|
||||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_EXTERN # remote (keyserver)
|
|
||||||
if len(distkeys) > 0: # testing
|
|
||||||
for k in distkeys:
|
|
||||||
key = gpg.get_key(k)
|
|
||||||
importkey = key.subkeys[0].fpr
|
|
||||||
gpg.keylist_mode = gpgme.KEYLIST_MODE_LOCAL # local keyring (default)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
print('{0}: [GPG] Importing {1} and signing it for verification purposes...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
distkey))
|
|
||||||
cmd = ['/usr/bin/gpg',
|
|
||||||
'--recv-keys',
|
|
||||||
'--batch',
|
|
||||||
'--yes',
|
|
||||||
'0x{0}'.format(importkey)]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
sigkeys = []
|
|
||||||
for i in gpg.get_key(importkey).subkeys:
|
|
||||||
sigkeys.append(i.fpr)
|
|
||||||
cmd = ['/usr/bin/gpg',
|
|
||||||
'--batch',
|
|
||||||
'--yes',
|
|
||||||
'--lsign-key',
|
|
||||||
'0x{0}'.format(importkey)]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# We need to expose this key to the chroots, too, so we need to export it.
|
|
||||||
with open('{0}/gpgkey.pub'.format(dlpath), 'wb') as f:
|
|
||||||
gpg.export(pkeys[0].subkeys[0].keyid, f)
|
|
||||||
return(gpg)
|
|
||||||
|
|
||||||
def killStaleAgent(conf):
|
|
||||||
# Kill off any stale GPG agents running.
|
|
||||||
# Probably not even needed, but good to have.
|
|
||||||
chrootdir = conf['build']['chrootdir']
|
|
||||||
gpgpath = conf['gpg']['mygpghome']
|
|
||||||
procs = psutil.process_iter()
|
|
||||||
plst = []
|
|
||||||
for p in procs:
|
|
||||||
if (p.name() in ('gpg-agent', 'dirmngr') and p.uids()[0] == os.getuid()):
|
|
||||||
pd = psutil.Process(p.pid).as_dict()
|
|
||||||
for d in (chrootdir, gpgpath):
|
|
||||||
if pd['cwd'].startswith('{0}'.format(d)):
|
|
||||||
plst.append(p.pid)
|
|
||||||
if len(plst) >= 1:
|
|
||||||
for p in plst:
|
|
||||||
psutil.Process(p).terminate()
|
|
||||||
|
|
||||||
def signIMG(path, conf):
|
|
||||||
if conf['build']['sign']:
|
|
||||||
# Do we want to kill off any stale gpg-agents? (So we spawn a new one)
|
|
||||||
# Requires further testing.
|
|
||||||
#killStaleAgent()
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
print('{0}: [GPG] Signing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
# May not be necessary; further testing necessary
|
|
||||||
#if os.getenv('GPG_AGENT_INFO'):
|
|
||||||
# del os.environ['GPG_AGENT_INFO']
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
# ASCII-armor (.asc)
|
|
||||||
gpg.armor = True
|
|
||||||
data_in = open(path, 'rb')
|
|
||||||
sigbuf = BytesIO()
|
|
||||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
|
||||||
_ = sigbuf.seek(0)
|
|
||||||
_ = data_in.seek(0)
|
|
||||||
data_in.close()
|
|
||||||
with open('{0}.asc'.format(path), 'wb') as f:
|
|
||||||
f.write(sigbuf.read())
|
|
||||||
print('{0}: [GPG] Wrote {1}.asc (ASCII-armored signature).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
# Binary signature (.sig)
|
|
||||||
gpg.armor = False
|
|
||||||
data_in = open(path, 'rb')
|
|
||||||
sigbuf = BytesIO()
|
|
||||||
sig = gpg.sign(data_in, sigbuf, gpgme.SIG_MODE_DETACH)
|
|
||||||
_ = sigbuf.seek(0)
|
|
||||||
_ = data_in.seek(0)
|
|
||||||
data_in.close()
|
|
||||||
with open('{0}.sig'.format(path), 'wb') as f:
|
|
||||||
f.write(sigbuf.read())
|
|
||||||
print('{0}: [GPG] Wrote {1}.sig (binary signature).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
path))
|
|
||||||
|
|
||||||
def gpgVerify(sigfile, datafile, conf):
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
fullkeys = []
|
|
||||||
print('{0}: [GPG] Verifying {1} with {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile,
|
|
||||||
sigfile))
|
|
||||||
keylst = gpg.keylist()
|
|
||||||
for k in keylst:
|
|
||||||
fullkeys.append(k.subkeys[0].fpr)
|
|
||||||
with open(sigfile,'rb') as s:
|
|
||||||
with open(datafile, 'rb') as f:
|
|
||||||
sig = gpg.verify(s, f, None)
|
|
||||||
for x in sig:
|
|
||||||
if x.validity <= 1:
|
|
||||||
if not x.validity_reason:
|
|
||||||
reason = 'we require a signature trust of 2 or higher'
|
|
||||||
else:
|
|
||||||
reason = x.validity_reason
|
|
||||||
print('{0}: [GPG] Key {1} failed to verify: {2}'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
x.fpr,
|
|
||||||
reason))
|
|
||||||
verified = False
|
|
||||||
skeys = []
|
|
||||||
for k in sig:
|
|
||||||
skeys.append(k.fpr)
|
|
||||||
if k.fpr in fullkeys:
|
|
||||||
verified = True
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
if verified:
|
|
||||||
print('{0}: [GPG] {1} verified (success).'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile))
|
|
||||||
else:
|
|
||||||
print('{0}: [GPG] {1} failed verification!'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
datafile))
|
|
||||||
return(verified)
|
|
||||||
|
|
||||||
def delTempKeys(conf):
|
|
||||||
# Create a config option to delete these.
|
|
||||||
# It's handy to keep these keys, but I'd understand if
|
|
||||||
# people didn't want to use them.
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']:
|
|
||||||
keys = []
|
|
||||||
if conf['gpgkey'] != '':
|
|
||||||
keys.append(gpg.get_key(conf['gpgkey']))
|
|
||||||
if conf['mygpghome'] == '':
|
|
||||||
keys.append(gpg.get_key(None, True)) # this is safe; we generated our own
|
|
||||||
for k in keys:
|
|
||||||
gpg.delete(k)
|
|
||||||
killStaleAgent(conf)
|
|
196
bdisk/bSSL.py
196
bdisk/bSSL.py
@ -1,196 +0,0 @@
|
|||||||
import OpenSSL
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import datetime
|
|
||||||
import re
|
|
||||||
|
|
||||||
def verifyCert(cert, key, CA = None):
|
|
||||||
# Verify a given certificate against a certificate.
|
|
||||||
# Optionally verify against a CA certificate as well (Hopefully. If/when PyOpenSSL ever supports it.)
|
|
||||||
chk = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
|
|
||||||
chk.use_privatekey(key)
|
|
||||||
chk.use_certificate(cert)
|
|
||||||
try:
|
|
||||||
chk.check_privatekey()
|
|
||||||
except OpenSSL.SSL.Error:
|
|
||||||
return(False)
|
|
||||||
exit(("{0}: {1} does not match {2}!".format(datetime.datetime.now(), key, cert)))
|
|
||||||
else:
|
|
||||||
print("{0}: [SSL] Verified {1} against {2} successfully.".format(datetime.datetime.now(), key, cert))
|
|
||||||
return(True)
|
|
||||||
# This is disabled because there doesn't seem to currently be any way
|
|
||||||
# to actually verify certificates against a given CA.
|
|
||||||
#if CA:
|
|
||||||
# try:
|
|
||||||
# magic stuff here
|
|
||||||
|
|
||||||
def sslCAKey(conf):
|
|
||||||
# TODO: use path from conf, even if it doesn't exist?
|
|
||||||
# if it does, read it into a pkey object
|
|
||||||
keyfile = conf['ipxe']['ssl_cakey']
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
key = OpenSSL.crypto.PKey()
|
|
||||||
print("{0}: [SSL] Generating SSL CA key...".format(datetime.datetime.now()))
|
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
|
||||||
with open(keyfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
|
||||||
return(key)
|
|
||||||
|
|
||||||
def sslCA(conf, key = None):
|
|
||||||
# NOTE: 'key' is a pkey OBJECT, not a file.
|
|
||||||
keyfile = conf['ipxe']['ssl_cakey']
|
|
||||||
crtfile = conf['ipxe']['ssl_ca']
|
|
||||||
if not key:
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
exit('{0}: ERROR: We need a key to generate a CA certificate!'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
if os.path.isfile(crtfile):
|
|
||||||
try:
|
|
||||||
ca = OpenSSL.crypto.load_certificate(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(crtfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL certificate.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
crtfile))
|
|
||||||
else:
|
|
||||||
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
|
|
||||||
conf['ipxe']['uri'],
|
|
||||||
flags=re.IGNORECASE)).lower()
|
|
||||||
# http://www.pyopenssl.org/en/stable/api/crypto.html#pkey-objects
|
|
||||||
# http://docs.ganeti.org/ganeti/2.14/html/design-x509-ca.html
|
|
||||||
ca = OpenSSL.crypto.X509()
|
|
||||||
ca.set_version(3)
|
|
||||||
ca.set_serial_number(1)
|
|
||||||
#ca.get_subject().CN = domain
|
|
||||||
ca.get_subject().CN = '{0} CA'.format(conf['bdisk']['name'])
|
|
||||||
ca.gmtime_adj_notBefore(0)
|
|
||||||
# valid for ROUGHLY 10 years. years(ish) * days * hours * mins * secs.
|
|
||||||
# the paramater is in seconds, which is why we need to multiply them all together.
|
|
||||||
ca.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
|
|
||||||
ca.set_issuer(ca.get_subject())
|
|
||||||
ca.set_pubkey(key)
|
|
||||||
ca.add_extensions([
|
|
||||||
OpenSSL.crypto.X509Extension(b"basicConstraints",
|
|
||||||
True,
|
|
||||||
b"CA:TRUE, pathlen:0"),
|
|
||||||
OpenSSL.crypto.X509Extension(b"keyUsage",
|
|
||||||
True,
|
|
||||||
b"keyCertSign, cRLSign"),
|
|
||||||
OpenSSL.crypto.X509Extension(b"subjectKeyIdentifier",
|
|
||||||
False,
|
|
||||||
b"hash",
|
|
||||||
subject = ca),])
|
|
||||||
ca.sign(key, "sha512")
|
|
||||||
with open(crtfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
|
|
||||||
return(ca)
|
|
||||||
|
|
||||||
def sslCKey(conf):
|
|
||||||
keyfile = conf['ipxe']['ssl_key']
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
key = OpenSSL.crypto.PKey()
|
|
||||||
print("{0}: [SSL] Generating SSL Client key...".format(datetime.datetime.now()))
|
|
||||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)
|
|
||||||
with open(keyfile, 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
|
||||||
return(key)
|
|
||||||
|
|
||||||
def sslCSR(conf, key = None):
|
|
||||||
# NOTE: 'key' is a pkey OBJECT, not a file.
|
|
||||||
keyfile = conf['ipxe']['ssl_key']
|
|
||||||
crtfile = conf['ipxe']['ssl_crt']
|
|
||||||
if not key:
|
|
||||||
if os.path.isfile(keyfile):
|
|
||||||
try:
|
|
||||||
key = OpenSSL.crypto.load_privatekey(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(keyfile).read())
|
|
||||||
except:
|
|
||||||
exit('{0}: ERROR: It seems that {1} is not a proper PEM-encoded SSL key.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
keyfile))
|
|
||||||
else:
|
|
||||||
exit('{0}: ERROR: We need a key to generate a CSR!'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
domain = (re.sub('^(https?|ftp)://([a-z0-9.-]+)/?.*$', '\g<2>',
|
|
||||||
conf['ipxe']['uri'],
|
|
||||||
flags=re.IGNORECASE)).lower()
|
|
||||||
csr = OpenSSL.crypto.X509Req()
|
|
||||||
csr.get_subject().CN = domain
|
|
||||||
#req.get_subject().countryName = 'xxx'
|
|
||||||
#req.get_subject().stateOrProvinceName = 'xxx'
|
|
||||||
#req.get_subject().localityName = 'xxx'
|
|
||||||
#req.get_subject().organizationName = 'xxx'
|
|
||||||
#req.get_subject().organizationalUnitName = 'xxx'
|
|
||||||
csr.set_pubkey(key)
|
|
||||||
csr.sign(key, "sha512")
|
|
||||||
with open('/tmp/main.csr', 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_PEM, csr))
|
|
||||||
return(csr)
|
|
||||||
|
|
||||||
def sslSign(conf, ca, key, csr):
|
|
||||||
#ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, ca)
|
|
||||||
#ca_key = OpenSSL.crypto.load_privatekey(key)
|
|
||||||
#req = OpenSSL.crypto.load_certificate_request(csr)
|
|
||||||
csr = OpenSSL.crypto.load_certificate_request(OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open("/tmp/main.csr").read())
|
|
||||||
cert = OpenSSL.crypto.X509()
|
|
||||||
cert.set_subject(csr.get_subject())
|
|
||||||
cert.set_serial_number(1)
|
|
||||||
cert.gmtime_adj_notBefore(0)
|
|
||||||
cert.gmtime_adj_notAfter(24 * 60 * 60)
|
|
||||||
cert.set_issuer(ca.get_subject())
|
|
||||||
cert.set_pubkey(csr.get_pubkey())
|
|
||||||
#cert.set_pubkey(ca.get_pubkey())
|
|
||||||
cert.sign(key, "sha512")
|
|
||||||
with open(conf['ipxe']['ssl_crt'], 'wb') as f:
|
|
||||||
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
|
|
||||||
return(cert)
|
|
||||||
|
|
||||||
def sslPKI(conf):
|
|
||||||
# run checks for everything, gen what's missing
|
|
||||||
ssldir = conf['ipxe']['ssldir']
|
|
||||||
os.makedirs(ssldir, exist_ok = True)
|
|
||||||
certfile = conf['ipxe']['ssl_crt']
|
|
||||||
key = sslCAKey(conf)
|
|
||||||
ca = sslCA(conf, key = key)
|
|
||||||
ckey = sslCKey(conf)
|
|
||||||
if os.path.isfile(certfile):
|
|
||||||
cert = OpenSSL.crypto.load_certificate(
|
|
||||||
OpenSSL.crypto.FILETYPE_PEM,
|
|
||||||
open(certfile).read())
|
|
||||||
if not verifyCert(cert, ckey):
|
|
||||||
csr = sslCSR(conf, ckey)
|
|
||||||
cert = sslSign(conf, ca, key, csr)
|
|
||||||
else:
|
|
||||||
csr = sslCSR(conf, ckey)
|
|
||||||
cert = sslSign(conf, ca, key, csr)
|
|
||||||
return(cert)
|
|
156
bdisk/bchroot.py
156
bdisk/bchroot.py
@ -1,156 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import psutil
|
|
||||||
import subprocess
|
|
||||||
import datetime
|
|
||||||
import tarfile
|
|
||||||
import humanize
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
|
|
||||||
def chroot(chrootdir, chroot_hostname, cmd = '/root/pre-build.sh'):
|
|
||||||
# MOUNT the chroot
|
|
||||||
mountpoints = psutil.disk_partitions(all = True)
|
|
||||||
mounts = []
|
|
||||||
for m in mountpoints:
|
|
||||||
mounts.append(m.mountpoint)
|
|
||||||
cmounts = {}
|
|
||||||
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
|
||||||
cmounts[m] = None
|
|
||||||
# chroot (bind mount... onto itself. it's so stupid, i know. see https://bugs.archlinux.org/task/46169)
|
|
||||||
if chrootdir not in mounts:
|
|
||||||
cmounts['chroot'] = ['/bin/mount',
|
|
||||||
'--bind',
|
|
||||||
chrootdir,
|
|
||||||
chrootdir]
|
|
||||||
# resolv
|
|
||||||
if (chrootdir + '/etc/resolv.conf') not in mounts:
|
|
||||||
cmounts['resolv'] = ['/bin/mount',
|
|
||||||
'--bind',
|
|
||||||
'-o', 'ro',
|
|
||||||
'/etc/resolv.conf',
|
|
||||||
chrootdir + '/etc/resolv.conf']
|
|
||||||
# proc
|
|
||||||
if (chrootdir + '/proc') not in mounts:
|
|
||||||
cmounts['proc'] = ['/bin/mount',
|
|
||||||
'-t', 'proc',
|
|
||||||
'-o', 'nosuid,noexec,nodev',
|
|
||||||
'proc',
|
|
||||||
chrootdir + '/proc']
|
|
||||||
# sys
|
|
||||||
if (chrootdir + '/sys') not in mounts:
|
|
||||||
cmounts['sys'] = ['/bin/mount',
|
|
||||||
'-t', 'sysfs',
|
|
||||||
'-o', 'nosuid,noexec,nodev,ro',
|
|
||||||
'sys',
|
|
||||||
chrootdir + '/sys']
|
|
||||||
# efi (if it exists on the host)
|
|
||||||
if '/sys/firmware/efi/efivars' in mounts:
|
|
||||||
if (chrootdir + '/sys/firmware/efi/efivars') not in mounts:
|
|
||||||
cmounts['efi'] = ['/bin/mount',
|
|
||||||
'-t', 'efivarfs',
|
|
||||||
'-o', 'nosuid,noexec,nodev',
|
|
||||||
'efivarfs',
|
|
||||||
chrootdir + '/sys/firmware/efi/efivars']
|
|
||||||
# dev
|
|
||||||
if (chrootdir + '/dev') not in mounts:
|
|
||||||
cmounts['dev'] = ['/bin/mount',
|
|
||||||
'-t', 'devtmpfs',
|
|
||||||
'-o', 'mode=0755,nosuid',
|
|
||||||
'udev',
|
|
||||||
chrootdir + '/dev']
|
|
||||||
# pts
|
|
||||||
if (chrootdir + '/dev/pts') not in mounts:
|
|
||||||
cmounts['pts'] = ['/bin/mount',
|
|
||||||
'-t', 'devpts',
|
|
||||||
'-o', 'mode=0620,gid=5,nosuid,noexec',
|
|
||||||
'devpts',
|
|
||||||
chrootdir + '/dev/pts']
|
|
||||||
# shm (if it exists on the host)
|
|
||||||
if '/dev/shm' in mounts:
|
|
||||||
if (chrootdir + '/dev/shm') not in mounts:
|
|
||||||
cmounts['shm'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'mode=1777,nosuid,nodev',
|
|
||||||
'shm',
|
|
||||||
chrootdir + '/dev/shm']
|
|
||||||
# run (if it exists on the host)
|
|
||||||
if '/run' in mounts:
|
|
||||||
if (chrootdir + '/run') not in mounts:
|
|
||||||
cmounts['run'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'nosuid,nodev,mode=0755',
|
|
||||||
'run',
|
|
||||||
chrootdir + '/run']
|
|
||||||
# tmp (if it exists on the host)
|
|
||||||
if '/tmp' in mounts:
|
|
||||||
if (chrootdir + '/tmp') not in mounts:
|
|
||||||
cmounts['tmp'] = ['/bin/mount',
|
|
||||||
'-t', 'tmpfs',
|
|
||||||
'-o', 'mode=1777,strictatime,nodev,nosuid',
|
|
||||||
'tmp',
|
|
||||||
chrootdir + '/tmp']
|
|
||||||
# the order we mount here is VERY IMPORTANT. Sure, we could do "for m in cmounts:", but dicts aren't ordered until python 3.6
|
|
||||||
# and this is SO important it's best that we be explicit as possible while we're still in alpha/beta stage. TODO?
|
|
||||||
for m in ('chroot', 'resolv', 'proc', 'sys', 'efi', 'dev', 'pts', 'shm', 'run', 'tmp'):
|
|
||||||
if cmounts[m]:
|
|
||||||
subprocess.call(cmounts[m])
|
|
||||||
print("{0}: [CHROOT] Running '{1}' ({2}). PROGRESS: tail -f {2}/var/log/chroot_install.log ...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd,
|
|
||||||
chrootdir))
|
|
||||||
real_root = os.open("/", os.O_RDONLY)
|
|
||||||
os.chroot(chrootdir)
|
|
||||||
os.system('/root/pre-build.sh')
|
|
||||||
os.fchdir(real_root)
|
|
||||||
os.chroot('.')
|
|
||||||
os.close(real_root)
|
|
||||||
if not os.path.isfile('{0}/sbin/init'.format(chrootdir)):
|
|
||||||
os.symlink('../lib/systemd/systemd', '{0}/sbin/init'.format(chrootdir))
|
|
||||||
return(chrootdir)
|
|
||||||
|
|
||||||
def chrootUnmount(chrootdir):
|
|
||||||
subprocess.call(['umount', '-lR', chrootdir])
|
|
||||||
|
|
||||||
def chrootTrim(build):
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
for a in arch:
|
|
||||||
# Compress the pacman and apacman caches.
|
|
||||||
for i in ('pacman', 'apacman'):
|
|
||||||
shutil.rmtree('{0}/root.{1}/var/cache/{2}'.format(chrootdir, a, i))
|
|
||||||
os.makedirs('{0}/root.{1}/usr/local/{2}'.format(chrootdir, a, i), exist_ok = True)
|
|
||||||
tarball = '{0}/root.{1}/usr/local/{2}/{2}.db.tar.xz'.format(chrootdir, a, i)
|
|
||||||
dbdir = '{0}/root.{1}/var/lib/{2}/local'.format(chrootdir, a, i)
|
|
||||||
if os.path.isdir(dbdir):
|
|
||||||
print("{0}: [CHROOT] Compressing {1}'s cache ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
chrootdir + '/root.' + a,
|
|
||||||
i))
|
|
||||||
if os.path.isfile(tarball):
|
|
||||||
os.remove(tarball)
|
|
||||||
with tarfile.open(name = tarball, mode = 'w:xz') as tar: # if this complains, use x:xz instead
|
|
||||||
tar.add(dbdir, arcname = os.path.basename(dbdir))
|
|
||||||
shutil.rmtree(dbdir, ignore_errors = True)
|
|
||||||
print("{0}: [CHROOT] Created {1} ({2}). {3} cleared.".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball,
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball)),
|
|
||||||
dbdir))
|
|
||||||
#for d in ('etc/pacman.d/gnupg', 'var/empty/.gnupg'): # actually, we should probably keep these.
|
|
||||||
# they don't take much space, and it's a PITA to pacman-key --init && pacman-key --populate again on boot.
|
|
||||||
# if os.path.isdir('{0}/root.{1}/{2}'.format(chrootdir, a, d)):
|
|
||||||
# shutil.rmtree('{0}/root.{1}/{2}'.format(chrootdir, a, d))
|
|
||||||
# TODO: move the self-cleanup in pre-build.sh to here.
|
|
||||||
delme = [#'/root/.gnupg', # see above
|
|
||||||
'/root/.bash_history',
|
|
||||||
#'/var/log/chroot_install.log', # disable for now. maybe always disable if debug is enabled? TODO.
|
|
||||||
'/.git',
|
|
||||||
'/root/.viminfo']
|
|
||||||
for i in delme:
|
|
||||||
fullpath = '{0}/root.{1}{2}'.format(chrootdir, a, i)
|
|
||||||
if os.path.isfile(fullpath):
|
|
||||||
os.remove(fullpath)
|
|
||||||
elif os.path.isdir(fullpath):
|
|
||||||
shutil.rmtree(fullpath, ignore_errors = True)
|
|
@ -1,71 +0,0 @@
|
|||||||
#!/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import host
|
|
||||||
import prep
|
|
||||||
import bchroot
|
|
||||||
import build
|
|
||||||
import datetime
|
|
||||||
import bSSL
|
|
||||||
import ipxe
|
|
||||||
import bsync
|
|
||||||
import bGPG
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def bdisk(args):
|
|
||||||
# we also need to figure out how to implement "mentos" (old bdisk) like functionality, letting us reuse an
|
|
||||||
# existing chroot install if possible to save time for future builds.
|
|
||||||
# if not, though, it's no big deal.
|
|
||||||
if os.getuid() != 0:
|
|
||||||
exit('{0}: ERROR: BDisk *must* be run as the root user or with sudo!'.format(datetime.datetime.now()))
|
|
||||||
print('{0}: Starting.'.format(datetime.datetime.now()))
|
|
||||||
conf = host.parseConfig(host.getConfig(conf_file = args['buildini']))[1]
|
|
||||||
prep.dirChk(conf)
|
|
||||||
conf['gpgobj'] = bGPG.genGPG(conf)
|
|
||||||
prep.buildChroot(conf, keep = False)
|
|
||||||
prep.prepChroot(conf)
|
|
||||||
arch = conf['build']['arch']
|
|
||||||
bGPG.killStaleAgent(conf)
|
|
||||||
for a in arch:
|
|
||||||
bchroot.chroot(conf['build']['chrootdir'] + '/root.' + a, 'bdisk.square-r00t.net')
|
|
||||||
bchroot.chrootUnmount(conf['build']['chrootdir'] + '/root.' + a)
|
|
||||||
prep.postChroot(conf)
|
|
||||||
bchroot.chrootTrim(conf['build'])
|
|
||||||
build.genImg(conf)
|
|
||||||
build.genUEFI(conf['build'], conf['bdisk'])
|
|
||||||
fulliso = build.genISO(conf)
|
|
||||||
bGPG.signIMG(fulliso['Main']['file'], conf)
|
|
||||||
build.displayStats(fulliso)
|
|
||||||
if conf['build']['ipxe']:
|
|
||||||
bSSL.sslPKI(conf)
|
|
||||||
ipxe.buildIPXE(conf)
|
|
||||||
iso = ipxe.genISO(conf)
|
|
||||||
if iso:
|
|
||||||
for x in iso.keys():
|
|
||||||
if x != 'name':
|
|
||||||
path = iso[x]['file']
|
|
||||||
bGPG.signIMG(path, conf)
|
|
||||||
build.displayStats(iso)
|
|
||||||
bsync.http(conf)
|
|
||||||
bsync.tftp(conf)
|
|
||||||
bsync.git(conf)
|
|
||||||
bsync.rsync(conf)
|
|
||||||
print('{0}: Finish.'.format(datetime.datetime.now()))
|
|
||||||
|
|
||||||
def parseArgs():
|
|
||||||
args = argparse.ArgumentParser(description = 'BDisk - a tool for building live/rescue media.',
|
|
||||||
epilog = 'brent s. || 2017 || https://bdisk.square-r00t.net')
|
|
||||||
args.add_argument('buildini',
|
|
||||||
metavar = '/path/to/build.ini',
|
|
||||||
default = '/etc/bdisk/build.ini',
|
|
||||||
nargs = '?',
|
|
||||||
help = 'The full/absolute path to the build.ini to use for this run. The default is /etc/bdisk/build.ini, but see https://bdisk.square-r00t.net/#the_code_build_ini_code_file.')
|
|
||||||
return(args)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = vars(parseArgs().parse_args())
|
|
||||||
bdisk(args)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
187
bdisk/bsync.py
187
bdisk/bsync.py
@ -1,187 +0,0 @@
|
|||||||
import shutil
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import datetime
|
|
||||||
import git
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
def http(conf):
|
|
||||||
http = conf['http']
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
if conf['sync']['http']:
|
|
||||||
uid = pwd.getpwnam(http['user'])[2]
|
|
||||||
gid = grp.getgrnam(http['group'])[2]
|
|
||||||
httpdir = http['path']
|
|
||||||
archboot = build['archboot']
|
|
||||||
# remove the destination if it exists
|
|
||||||
if os.path.isdir(httpdir):
|
|
||||||
print('{0}: [HTTP] Removing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
httpdir))
|
|
||||||
shutil.rmtree(httpdir)
|
|
||||||
# just to make it again. we do this to avoid file existing conflicts.
|
|
||||||
os.makedirs(httpdir)
|
|
||||||
# here we build a dict of files to copy and their destination paths.
|
|
||||||
httpfiles = {}
|
|
||||||
print('{0}: [HTTP] (Boot files) => {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
httpdir))
|
|
||||||
for a in arch:
|
|
||||||
for i in ('md5', 'sfs', 'sha256', 'sha512'):
|
|
||||||
httpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
|
||||||
httpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
httpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.64.kern'.format(bdisk['uxname'])
|
|
||||||
httpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.64.img'.format(bdisk['uxname'])
|
|
||||||
if 'i686' in arch:
|
|
||||||
httpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
|
||||||
httpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
|
||||||
httpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
|
|
||||||
# and now the magic.
|
|
||||||
for k in httpfiles.keys():
|
|
||||||
destpath = httpfiles[k]
|
|
||||||
fulldest = '{0}/{1}'.format(httpdir, destpath)
|
|
||||||
parentdir = os.path.split(fulldest)[0]
|
|
||||||
os.makedirs(parentdir, exist_ok = True)
|
|
||||||
if os.path.lexists('{0}/{1}'.format(prepdir, k)):
|
|
||||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(httpdir, httpfiles[k]))
|
|
||||||
for root, dirs, files in os.walk(httpdir):
|
|
||||||
for d in dirs:
|
|
||||||
os.chown(os.path.join(root, d), uid, gid)
|
|
||||||
for f in files:
|
|
||||||
os.chown(os.path.join(root, f), uid, gid)
|
|
||||||
|
|
||||||
def tftp(conf):
|
|
||||||
# TODO: pxelinux cfg
|
|
||||||
tftp = conf['tftp']
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
if conf['sync']['tftp']:
|
|
||||||
uid = pwd.getpwnam(tftp['user'])[2]
|
|
||||||
gid = grp.getgrnam(tftp['group'])[2]
|
|
||||||
tftpdir = tftp['path']
|
|
||||||
# remove the destination if it exists
|
|
||||||
if os.path.isdir(tftpdir):
|
|
||||||
print('{0}: [TFTP] Removing {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tftpdir))
|
|
||||||
shutil.rmtree(tftpdir)
|
|
||||||
# and we make it again
|
|
||||||
os.makedirs(tftpdir)
|
|
||||||
# and make a dict of the files etc.
|
|
||||||
tftpfiles = {}
|
|
||||||
print('{0}: [TFTP] (Boot files) => {1}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tftpdir))
|
|
||||||
for a in arch:
|
|
||||||
for i in ('md5', 'sfs', 'sha256', 'sha512'):
|
|
||||||
tftpfiles['{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)] = '{0}/{1}/airootfs.{2}'.format(bdisk['name'], a, i)
|
|
||||||
tftpfiles['VERSION_INFO.txt'] = 'VERSION_INFO.txt'
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
tftpfiles['boot/{0}.64.kern'.format(bdisk['uxname'])] = '{0}.kern'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['boot/{0}.64.img'.format(bdisk['uxname'])] = '{0}.img'.format(bdisk['uxname'])
|
|
||||||
if 'i686' in arch:
|
|
||||||
tftpfiles['boot/{0}.32.kern'.format(bdisk['uxname'])] = '{0}.32.kern'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['boot/{0}.32.img'.format(bdisk['uxname'])] = '{0}.32.img'.format(bdisk['uxname'])
|
|
||||||
tftpfiles['{0}.png'.format(bdisk['uxname'])] = '{0}.png'.format(bdisk['uxname'])
|
|
||||||
# and now the magic.
|
|
||||||
for k in tftpfiles.keys():
|
|
||||||
destpath = tftpfiles[k]
|
|
||||||
fulldest = '{0}/{1}'.format(tftpdir, destpath)
|
|
||||||
parentdir = os.path.split(fulldest)[0]
|
|
||||||
os.makedirs(parentdir, exist_ok = True)
|
|
||||||
shutil.copy2('{0}/{1}'.format(prepdir, k), '{0}/{1}'.format(tftpdir, tftpfiles[k]))
|
|
||||||
for root, dirs, files in os.walk(tftpdir):
|
|
||||||
for d in dirs:
|
|
||||||
os.chown(os.path.join(root, d), uid, gid)
|
|
||||||
for f in files:
|
|
||||||
os.chown(os.path.join(root, f), uid, gid)
|
|
||||||
|
|
||||||
def git(conf):
|
|
||||||
build = conf['build']
|
|
||||||
git_name = conf['bdisk']['dev']
|
|
||||||
git_email = conf['bdisk']['email']
|
|
||||||
if conf['sync']['git']:
|
|
||||||
print('{0}: [GIT] Creating commit...'.format(datetime.datetime.now()))
|
|
||||||
repo = git.Repo(build['basedir'])
|
|
||||||
repo.git.add('--all')
|
|
||||||
repo.index.commit("automated commit from BDisk (git:sync)")
|
|
||||||
print('{0}: [GIT] Pushing to remote...'.format(datetime.datetime.now()))
|
|
||||||
repo.remotes.origin.push()
|
|
||||||
|
|
||||||
|
|
||||||
def rsync(conf):
|
|
||||||
# TODO: just copy tftpbooting pxelinux.cfg (to be generated) if tftp,
|
|
||||||
# and do nothing if http- copying over three copies of the squashed filesystems
|
|
||||||
# is a waste of time, bandwidth, and disk space on target.
|
|
||||||
build = conf['build']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
isodir = build['isodir']
|
|
||||||
arch = build['arch']
|
|
||||||
rsync = conf['rsync']
|
|
||||||
sync = conf['sync']
|
|
||||||
server = rsync['host']
|
|
||||||
path = rsync['path']
|
|
||||||
user = rsync['user']
|
|
||||||
locpath = False
|
|
||||||
if sync['rsync']:
|
|
||||||
# TODO: some sort of debugging/logging
|
|
||||||
cmd = ['/usr/bin/rsync',
|
|
||||||
'-a',
|
|
||||||
'-q',
|
|
||||||
'-z',
|
|
||||||
locpath,
|
|
||||||
'{0}@{1}:{2}/.'.format(user, server, path)]
|
|
||||||
#if sync['http']: # TODO: rsync:http to enable this
|
|
||||||
# cmd[4] = conf['http']['path']
|
|
||||||
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
|
||||||
# datetime.datetime.now(),
|
|
||||||
# cmd[4],
|
|
||||||
# server))
|
|
||||||
# subprocess.call(cmd)
|
|
||||||
#if sync['tftp']:
|
|
||||||
# cmd[4] = conf['tftp']['path']
|
|
||||||
# print('{0}: Syncing {1} to {2}. Please wait...'.format(
|
|
||||||
# datetime.datetime.now(),
|
|
||||||
# cmd[4],
|
|
||||||
# server))
|
|
||||||
# subprocess.call(cmd)
|
|
||||||
if conf['ipxe']:
|
|
||||||
cmd[4] = build['archboot']
|
|
||||||
print('{0}: [RSYNC] {1} => {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd[4],
|
|
||||||
server))
|
|
||||||
subprocess.call(cmd)
|
|
||||||
cmd[4] = '{0}/boot'.format(build['prepdir'])
|
|
||||||
subprocess.call(cmd)
|
|
||||||
if conf['rsync']['iso']:
|
|
||||||
cmd[4] = isodir
|
|
||||||
print('{0}: [RSYNC] {1} => {2}...'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
cmd[4],
|
|
||||||
server))
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# Now we copy some extra files.
|
|
||||||
prebuild_dir = '{0}/extra/pre-build.d'.format(build['basedir'])
|
|
||||||
rsync_files = ['{0}/VERSION_INFO.txt'.format(prepdir),
|
|
||||||
'{0}/root/packages.both'.format(prebuild_dir),
|
|
||||||
'{0}/root/iso.pkgs.both'.format(prebuild_dir)]
|
|
||||||
for x in rsync_files:
|
|
||||||
cmd[4] = x
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# And we grab the remaining, since we need to rename them.
|
|
||||||
for a in arch:
|
|
||||||
cmd[4] = '{0}/{1}/root/packages.arch'.format(prebuild_dir, a)
|
|
||||||
cmd[5] = '{0}@{1}:{2}/packages.{3}'.format(user, server, path, a)
|
|
||||||
subprocess.call(cmd)
|
|
||||||
cmd[4] = '{0}/{1}/root/iso.pkgs.arch'.format(prebuild_dir, a)
|
|
||||||
cmd[5] = '{0}@{1}:{2}/iso.pkgs.{3}'.format(user, server, path, a)
|
|
||||||
subprocess.call(cmd)
|
|
416
bdisk/build.py
416
bdisk/build.py
@ -1,416 +0,0 @@
|
|||||||
import os
|
|
||||||
import tarfile
|
|
||||||
import shutil
|
|
||||||
import glob
|
|
||||||
import subprocess
|
|
||||||
import hashlib
|
|
||||||
import psutil
|
|
||||||
import jinja2
|
|
||||||
import humanize
|
|
||||||
import datetime
|
|
||||||
import bGPG # bdisk.bGPG
|
|
||||||
from urllib.request import urlopen
|
|
||||||
|
|
||||||
|
|
||||||
def genImg(conf):
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
build = conf['build']
|
|
||||||
arch = build['arch']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
archboot = build['archboot']
|
|
||||||
basedir = build['basedir']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
hashes = {}
|
|
||||||
hashes['sha512'] = {}
|
|
||||||
hashes['sha256'] = {}
|
|
||||||
hashes['md5'] = {}
|
|
||||||
squashfses = []
|
|
||||||
for a in arch:
|
|
||||||
if a == 'i686':
|
|
||||||
bitness = '32'
|
|
||||||
elif a == 'x86_64':
|
|
||||||
bitness = '64'
|
|
||||||
# Create the squashfs image
|
|
||||||
airoot = archboot + '/' + a + '/'
|
|
||||||
squashimg = airoot + 'airootfs.sfs'
|
|
||||||
os.makedirs(airoot, exist_ok = True)
|
|
||||||
print("{0}: [BUILD] Squashing filesystem ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
chrootdir + '/root.' + a))
|
|
||||||
# TODO: use stdout and -progress if debugging is enabled. the below subprocess.call() just redirects to
|
|
||||||
# /dev/null.
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/usr/bin/mksquashfs',
|
|
||||||
chrootdir + '/root.' + a,
|
|
||||||
squashimg,
|
|
||||||
'-no-progress',
|
|
||||||
'-noappend',
|
|
||||||
'-comp', 'xz']
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
print("{0}: [BUILD] Generated {1} ({2}).".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
squashimg,
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(squashimg))))
|
|
||||||
# Generate the checksum files
|
|
||||||
print("{0}: [BUILD] Generating SHA512 SHA256, MD5 checksums ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
squashimg))
|
|
||||||
hashes['sha512'][a] = hashlib.sha512()
|
|
||||||
hashes['sha256'][a] = hashlib.sha256()
|
|
||||||
hashes['md5'][a] = hashlib.md5()
|
|
||||||
with open(squashimg, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
# NOTE: these items are hashlib objects, NOT strings!
|
|
||||||
hashes['sha512'][a].update(stream)
|
|
||||||
hashes['sha256'][a].update(stream)
|
|
||||||
hashes['md5'][a].update(stream)
|
|
||||||
with open(airoot + 'airootfs.sha512', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['sha512'][a].hexdigest()))
|
|
||||||
with open(airoot + 'airootfs.sha256', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['sha256'][a].hexdigest()))
|
|
||||||
with open(airoot + 'airootfs.md5', 'w+') as f:
|
|
||||||
f.write("{0} airootfs.sfs\n".format(hashes['md5'][a].hexdigest()))
|
|
||||||
squashfses.append('{0}'.format(squashimg))
|
|
||||||
print("{0}: [BUILD] Hash checksums complete.".format(datetime.datetime.now()))
|
|
||||||
# Logo
|
|
||||||
os.makedirs(prepdir + '/boot', exist_ok = True)
|
|
||||||
if not os.path.isfile('{0}/extra/{1}.png'.format(basedir, bdisk['uxname'])):
|
|
||||||
shutil.copy2(basedir + '/extra/bdisk.png', '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
|
||||||
else:
|
|
||||||
shutil.copy2(basedir + '/extra/{0}.png'.format(bdisk['uxname']), '{0}/{1}.png'.format(prepdir, bdisk['uxname']))
|
|
||||||
# Kernels, initrds...
|
|
||||||
# We use a dict here so we can use the right filenames...
|
|
||||||
# I might change how I handle this in the future.
|
|
||||||
bootfiles = {}
|
|
||||||
#bootfiles['kernel'] = ['vmlinuz-linux-' + bdisk['name'], '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
|
||||||
bootfiles['kernel'] = ['vmlinuz-linux', '{0}.{1}.kern'.format(bdisk['uxname'], bitness)]
|
|
||||||
#bootfiles['initrd'] = ['initramfs-linux-{0}.img'.format(bdisk['name']), '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
|
||||||
bootfiles['initrd'] = ['initramfs-linux.img', '{0}.{1}.img'.format(bdisk['uxname'], bitness)]
|
|
||||||
for x in ('kernel', 'initrd'):
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/{2}'.format(chrootdir, a, bootfiles[x][0]), '{0}/boot/{1}'.format(prepdir, bootfiles[x][1]))
|
|
||||||
for i in squashfses:
|
|
||||||
bGPG.signIMG(i, conf)
|
|
||||||
|
|
||||||
|
|
||||||
def genUEFI(build, bdisk):
|
|
||||||
arch = build['arch']
|
|
||||||
# 32-bit EFI implementations are nigh nonexistant.
|
|
||||||
# We don't really need to worry about them.
|
|
||||||
# Plus there's always multiarch.
|
|
||||||
# I can probably do this better with a dict... TODO.
|
|
||||||
if 'x86_64' in arch:
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
basedir = build['basedir']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
mountpt = build['mountpt']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
efidir = '{0}/EFI/{1}'.format(prepdir, bdisk['name'])
|
|
||||||
os.makedirs(efidir, exist_ok = True)
|
|
||||||
efiboot_img = efidir + '/efiboot.img'
|
|
||||||
os.makedirs(prepdir + '/EFI/boot', exist_ok = True)
|
|
||||||
os.makedirs(efidir, exist_ok = True)
|
|
||||||
## Download the EFI shells if we don't have them.
|
|
||||||
# For UEFI 2.3+ (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=UEFI_Shell)
|
|
||||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v2.efi'):
|
|
||||||
shell2_path = prepdir + '/EFI/shellx64_v2.efi'
|
|
||||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell2_path))
|
|
||||||
shell2_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi'
|
|
||||||
shell2_fetch = urlopen(shell2_url)
|
|
||||||
with open(shell2_path, 'wb+') as dl:
|
|
||||||
dl.write(shell2_fetch.read())
|
|
||||||
shell2_fetch.close()
|
|
||||||
# Shell for older versions (http://sourceforge.net/apps/mediawiki/tianocore/index.php?title=Efi-shell)
|
|
||||||
# TODO: is there an Arch package for this? can we just install that in the chroot and copy the shell binaries?
|
|
||||||
if not os.path.isfile(prepdir + '/EFI/shellx64_v1.efi'):
|
|
||||||
shell1_path = prepdir + '/EFI/shellx64_v1.efi'
|
|
||||||
print("{0}: [BUILD] Warning: You are missing {1}. Fetching...".format(datetime.datetime.now(), shell1_path))
|
|
||||||
shell1_url = 'https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi'
|
|
||||||
shell1_fetch = urlopen(shell1_url)
|
|
||||||
with open(shell1_path, 'wb+') as dl:
|
|
||||||
dl.write(shell1_fetch.read())
|
|
||||||
shell1_fetch.close()
|
|
||||||
print("{0}: [BUILD] Building UEFI support...".format(datetime.datetime.now()))
|
|
||||||
## But wait! That's not all! We need more binaries.
|
|
||||||
# Looks like these are in the "efitools" package now.
|
|
||||||
for f in ('PreLoader.efi', 'HashTool.efi'):
|
|
||||||
if f == 'PreLoader.efi':
|
|
||||||
fname = 'bootx64.efi'
|
|
||||||
else:
|
|
||||||
fname = f
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir,
|
|
||||||
f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(prepdir, fname), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And we also need the systemd efi bootloader.
|
|
||||||
if os.path.isfile(prepdir + '/EFI/boot/loader.efi'):
|
|
||||||
os.remove(prepdir + '/EFI/boot/loader.efi')
|
|
||||||
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
|
|
||||||
chrootdir),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/loader.efi'.format(prepdir), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And the accompanying configs for the systemd efi bootloader, too.
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
os.makedirs(prepdir + '/loader/entries', exist_ok = True)
|
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
|
||||||
if t == 'base':
|
|
||||||
fname = bdisk['uxname'] + '.conf'
|
|
||||||
elif t not in ('uefi1', 'uefi2'):
|
|
||||||
fname = t + '.conf'
|
|
||||||
else:
|
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
|
||||||
if t == 'loader':
|
|
||||||
tplpath = prepdir + '/loader/'
|
|
||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
|
||||||
else:
|
|
||||||
tplpath = prepdir + '/loader/entries/'
|
|
||||||
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open(tplpath + fname, "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And we need to get filesizes (in bytes) for everything we need to include in the ESP.
|
|
||||||
# This is more important than it looks.
|
|
||||||
sizetotal = 33553920 # The spec'd EFI binary size (32MB). It's okay to go over this though (and we do)
|
|
||||||
# because xorriso sees it as a filesystem image and adjusts the ISO automagically.
|
|
||||||
#sizetotal = 2097152 # we start with 2MB and add to it for wiggle room
|
|
||||||
sizefiles = ['/boot/' + bdisk['uxname'] + '.64.img',
|
|
||||||
'/boot/' + bdisk['uxname'] + '.64.kern',
|
|
||||||
'/EFI/boot/bootx64.efi',
|
|
||||||
'/EFI/boot/loader.efi',
|
|
||||||
'/EFI/boot/HashTool.efi',
|
|
||||||
'/EFI/shellx64_v1.efi',
|
|
||||||
'/EFI/shellx64_v2.efi']
|
|
||||||
for i in sizefiles:
|
|
||||||
sizetotal += os.path.getsize(prepdir + i)
|
|
||||||
# Loader configs
|
|
||||||
for (path, dirs, files) in os.walk(prepdir + '/loader/'):
|
|
||||||
for file in files:
|
|
||||||
fname = os.path.join(path, file)
|
|
||||||
sizetotal += os.path.getsize(fname)
|
|
||||||
# And now we create the EFI binary filesystem image/binary...
|
|
||||||
print("{0}: [BUILD] Creating EFI ESP image {2} ({1})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
humanize.naturalsize(sizetotal),
|
|
||||||
efiboot_img))
|
|
||||||
if os.path.isfile(efiboot_img):
|
|
||||||
os.remove(efiboot_img)
|
|
||||||
with open(efiboot_img, 'wb+') as f:
|
|
||||||
f.truncate(sizetotal)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', bdisk['name'] + '_EFI', efiboot_img]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
cmd = ['/bin/mount', efiboot_img, build['mountpt']]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
os.makedirs('{0}/EFI/{1}'.format(build['mountpt'], bdisk['name']))
|
|
||||||
os.makedirs(build['mountpt'] + '/EFI/boot')
|
|
||||||
os.makedirs(build['mountpt'] + '/loader/entries')
|
|
||||||
# Ready for some deja vu? This is because it uses an embedded version as well for hybrid ISO.
|
|
||||||
# I think.
|
|
||||||
# TODO: just move this to a function instead, with "efi" as a param and change
|
|
||||||
# the templates to use "if efi == 'yes'" instead.
|
|
||||||
# function should set the "installation" path for the conf as well based on the value of efi
|
|
||||||
# parameter.
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
for t in ('loader', 'ram', 'base', 'uefi2', 'uefi1'):
|
|
||||||
if t == 'base':
|
|
||||||
fname = bdisk['uxname'] + '.conf'
|
|
||||||
elif t in ('uefi1', 'uefi2'):
|
|
||||||
fname = t + '.conf'
|
|
||||||
else:
|
|
||||||
fname = bdisk['uxname'] + '_' + t + '.conf'
|
|
||||||
if t == 'loader':
|
|
||||||
tplpath = build['mountpt'] + '/loader/'
|
|
||||||
fname = 'loader.conf' # we change the var from above because it's an oddball.
|
|
||||||
else:
|
|
||||||
tplpath = build['mountpt'] + '/loader/entries/'
|
|
||||||
tpl = env.get_template('EFI/' + t + '.conf.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, efi = 'yes')
|
|
||||||
with open(tplpath + fname, "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
for x in ('bootx64.efi', 'HashTool.efi', 'loader.efi'):
|
|
||||||
y = prepdir + '/EFI/boot/' + x
|
|
||||||
z = mountpt + '/EFI/boot/' + x
|
|
||||||
if os.path.isfile(z):
|
|
||||||
os.remove(z)
|
|
||||||
shutil.copy(y, z)
|
|
||||||
for x in ('shellx64_v1.efi', 'shellx64_v2.efi'):
|
|
||||||
y = prepdir + '/EFI/' + x
|
|
||||||
z = mountpt + '/EFI/' + x
|
|
||||||
if os.path.isfile(z):
|
|
||||||
os.remove(z)
|
|
||||||
shutil.copy(y, z)
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux'.format(chrootdir, 'x86_64'),
|
|
||||||
'{0}/EFI/{1}/{2}.efi'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
|
||||||
shutil.copy2('{0}/root.{1}/boot/initramfs-linux.img'.format(chrootdir, 'x86_64'),
|
|
||||||
'{0}/EFI/{1}/{2}.img'.format(mountpt, bdisk['name'], bdisk['uxname']))
|
|
||||||
# TODO: support both arch's as EFI bootable instead? Maybe? requires more research. very rare.
|
|
||||||
#shutil.copy2('{0}/root.{1}/boot/vmlinuz-linux-{2}'.format(chrootdir, a, bdisk['name']),
|
|
||||||
# '{0}/EFI/{1}/{2}.{3}.efi'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
|
||||||
#shutil.copy2('{0}/root.{1}/boot/initramfs-linux-{2}.img'.format(chrootdir, a, bdisk['uxname']),
|
|
||||||
# '{0}/EFI/{1}/{2}.{3}.img'.format(mountpt, bdisk['name'], bdisk['uxname'], bitness))
|
|
||||||
cmd = ['/bin/umount', mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
efisize = humanize.naturalsize(os.path.getsize(efiboot_img))
|
|
||||||
print('{0}: [BUILD] Built EFI binary.'.format(datetime.datetime.now()))
|
|
||||||
return(efiboot_img)
|
|
||||||
|
|
||||||
def genISO(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
archboot = build['archboot']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
arch = build['arch']
|
|
||||||
builddir = prepdir + '/' + bdisk['name']
|
|
||||||
extradir = build['basedir'] + '/extra/'
|
|
||||||
# arch[0] is safe to use, even if multiarch, because the only cases when it'd be ambiguous
|
|
||||||
# is when x86_64 is specifically set to [0]. See host.py's parseConfig().
|
|
||||||
# TODO: can we use syslinux for EFI too instead of prebootloader?
|
|
||||||
syslinuxdir = build['chrootdir'] + '/root.' + arch[0] + '/usr/lib/syslinux/bios/'
|
|
||||||
sysl_tmp = prepdir + '/isolinux/'
|
|
||||||
ver = bdisk['ver']
|
|
||||||
if len(arch) == 1:
|
|
||||||
isofile = '{0}-{1}-{2}-{3}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'], arch[0])
|
|
||||||
else:
|
|
||||||
isofile = '{0}-{1}-{2}.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
|
|
||||||
isopath = build['isodir'] + '/' + isofile
|
|
||||||
arch = build['arch']
|
|
||||||
# In case we're building a single-arch ISO...
|
|
||||||
if len(arch) == 1:
|
|
||||||
isolinux_cfg = '/BIOS/isolinux.cfg.arch.j2'
|
|
||||||
if arch[0] == 'i686':
|
|
||||||
bitness = '32'
|
|
||||||
efi = False
|
|
||||||
elif arch[0] == 'x86_64':
|
|
||||||
bitness = '64'
|
|
||||||
efi = True
|
|
||||||
else:
|
|
||||||
isolinux_cfg = '/BIOS/isolinux.cfg.multi.j2'
|
|
||||||
bitness = False
|
|
||||||
efi = True
|
|
||||||
if os.path.isfile(isopath):
|
|
||||||
os.remove(isopath)
|
|
||||||
if archboot != prepdir + '/' + bdisk['name']: # best to use static concat here...
|
|
||||||
if os.path.isdir(builddir):
|
|
||||||
shutil.rmtree(builddir, ignore_errors = True)
|
|
||||||
shutil.copytree(archboot, builddir)
|
|
||||||
if build['ipxe']:
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
if ipxe['iso']:
|
|
||||||
minifile = '{0}-{1}-mini.iso'.format(bdisk['uxname'], bdisk['ver'])
|
|
||||||
minipath = build['isodir'] + '/' + minifile
|
|
||||||
if ipxe['usb']:
|
|
||||||
usbfile = '{0}-{1}-mini.usb.img'.format(bdisk['uxname'], bdisk['ver'])
|
|
||||||
minipath = build['isodir'] + '/' + usbfile
|
|
||||||
# Copy isolinux files
|
|
||||||
print("{0}: [BUILD] Staging ISO preparation...".format(datetime.datetime.now()))
|
|
||||||
isolinux_files = ['isolinux.bin',
|
|
||||||
'vesamenu.c32',
|
|
||||||
'linux.c32',
|
|
||||||
'reboot.c32']
|
|
||||||
# TODO: implement debugging mode in bdisk
|
|
||||||
#if debug:
|
|
||||||
# isolinux_files[0] = 'isolinux-debug.bin'
|
|
||||||
os.makedirs(sysl_tmp, exist_ok = True)
|
|
||||||
for f in isolinux_files:
|
|
||||||
if os.path.isfile(sysl_tmp + f):
|
|
||||||
os.remove(sysl_tmp + f)
|
|
||||||
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
|
||||||
ifisolinux_files = ['ldlinux.c32',
|
|
||||||
'libcom32.c32',
|
|
||||||
'libutil.c32',
|
|
||||||
'ifcpu64.c32']
|
|
||||||
for f in ifisolinux_files:
|
|
||||||
if os.path.isfile(sysl_tmp + f):
|
|
||||||
os.remove(sysl_tmp + f)
|
|
||||||
shutil.copy2(syslinuxdir + f, sysl_tmp + f)
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
tpl = env.get_template(isolinux_cfg)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, bitness = bitness)
|
|
||||||
with open(sysl_tmp + '/isolinux.cfg', "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And we need to build the ISO!
|
|
||||||
# TODO: only include UEFI support if we actually built it!
|
|
||||||
print("{0}: [BUILD] Building full ISO ({1})...".format(datetime.datetime.now(), isopath))
|
|
||||||
if efi:
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'],
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
|
|
||||||
'-eltorito-alt-boot',
|
|
||||||
'-e', 'EFI/' + bdisk['name'] + '/efiboot.img',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
prepdir]
|
|
||||||
else:
|
|
||||||
# UNTESTED. TODO.
|
|
||||||
# I think i want to also get rid of: -boot-load-size 4,
|
|
||||||
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
|
|
||||||
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'],
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', syslinuxdir + 'isohdpfx.bin',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
prepdir]
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# Get size of ISO
|
|
||||||
iso = {}
|
|
||||||
iso['name'] = ['Main']
|
|
||||||
iso['Main'] = {}
|
|
||||||
iso['Main']['sha'] = hashlib.sha256()
|
|
||||||
with open(isopath, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
iso['Main']['sha'].update(stream)
|
|
||||||
iso['Main']['sha'] = iso['Main']['sha'].hexdigest()
|
|
||||||
iso['Main']['file'] = isopath
|
|
||||||
iso['Main']['size'] = humanize.naturalsize(os.path.getsize(isopath))
|
|
||||||
iso['Main']['type'] = 'Full'
|
|
||||||
iso['Main']['fmt'] = 'Hybrid ISO'
|
|
||||||
return(iso)
|
|
||||||
|
|
||||||
def displayStats(iso):
|
|
||||||
for i in iso['name']:
|
|
||||||
print("{0}: == {1} {2} ==".format(datetime.datetime.now(), iso[i]['type'], iso[i]['fmt']))
|
|
||||||
print('\t\t\t = Size: {0}'.format(iso[i]['size']))
|
|
||||||
print('\t\t\t = SHA256: {0}'.format(iso[i]['sha']))
|
|
||||||
print('\t\t\t = Location: {0}'.format(iso[i]['file']))
|
|
||||||
|
|
||||||
def cleanUp():
|
|
||||||
# TODO: clear out all of prepdir?
|
|
||||||
pass
|
|
0
bdisk/confparse.py
Normal file
0
bdisk/confparse.py
Normal file
0
bdisk/env_prep.py
Normal file
0
bdisk/env_prep.py
Normal file
0
bdisk/git.py
Normal file
0
bdisk/git.py
Normal file
194
bdisk/host.py
194
bdisk/host.py
@ -1,194 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import glob
|
|
||||||
import configparser
|
|
||||||
import validators
|
|
||||||
import git
|
|
||||||
import datetime
|
|
||||||
from socket import getaddrinfo
|
|
||||||
|
|
||||||
def getOS():
|
|
||||||
# Returns one of: SuSE, debian, fedora, redhat, centos, mandrake,
|
|
||||||
# mandriva, rocks, slackware, yellowdog, gentoo, UnitedLinux,
|
|
||||||
# turbolinux, arch, mageia
|
|
||||||
distro = list(platform.linux_distribution())[0].lower()
|
|
||||||
return(distro)
|
|
||||||
|
|
||||||
def getBits():
|
|
||||||
bits = list(platform.architecture())[0]
|
|
||||||
return(bits)
|
|
||||||
|
|
||||||
def getHostname():
|
|
||||||
hostname = platform.node()
|
|
||||||
return(hostname)
|
|
||||||
|
|
||||||
def getConfig(conf_file = '/etc/bdisk/build.ini'):
|
|
||||||
conf = False
|
|
||||||
# define some defailt conf paths in case we're installed by
|
|
||||||
# a package manager. in order of the paths we should search.
|
|
||||||
currentdir = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
currentdir_user = os.path.abspath('{0}/../build.ini'.format(currentdir))
|
|
||||||
currentdir_def = os.path.abspath('{0}/../extra/dist.build.ini'.format(currentdir))
|
|
||||||
default_conf_paths = ['/etc/bdisk/build.ini',
|
|
||||||
'/usr/share/bdisk/build.ini',
|
|
||||||
'/usr/share/bdisk/extra/build.ini',
|
|
||||||
'/usr/share/docs/bdisk/build.ini', # this is the preferred installation path for packagers
|
|
||||||
'/usr/local/etc/bdisk/build.ini',
|
|
||||||
'/usr/local/share/docs/bdisk/build.ini',
|
|
||||||
'/opt/dev/bdisk/build.ini',
|
|
||||||
'/opt/dev/bdisk/extra/build.ini',
|
|
||||||
'/opt/dev/bdisk/extra/dist.build.ini',
|
|
||||||
currentdir_user]
|
|
||||||
# if we weren't given one/using the default...
|
|
||||||
if conf_file == '/etc/bdisk/build.ini':
|
|
||||||
if not os.path.isfile(conf_file):
|
|
||||||
for p in default_conf_paths:
|
|
||||||
if os.path.isfile(p):
|
|
||||||
conf = p
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
conf = conf_file
|
|
||||||
else:
|
|
||||||
conf = conf_file
|
|
||||||
defconf = os.path.abspath('{0}/../extra/dist.build.ini'.format(os.path.dirname(os.path.realpath(__file__))))
|
|
||||||
if not conf:
|
|
||||||
# okay, so let's check for distributed/"blank" ini's
|
|
||||||
# since we can't seem to find one.
|
|
||||||
dist_conf_paths = [re.sub('(build\.ini)','dist.\\1', s) for s in default_conf_paths]
|
|
||||||
for q in dist_conf_paths:
|
|
||||||
if os.path.isfile(q):
|
|
||||||
conf = q
|
|
||||||
break
|
|
||||||
if os.path.isfile(default_conf_paths[4]):
|
|
||||||
defconf = default_conf_paths[4]
|
|
||||||
confs = [defconf, conf]
|
|
||||||
return(confs)
|
|
||||||
|
|
||||||
def parseConfig(confs):
|
|
||||||
config = configparser.ConfigParser()
|
|
||||||
config._interpolation = configparser.ExtendedInterpolation()
|
|
||||||
config.read(confs)
|
|
||||||
# a dict makes this so much easier.
|
|
||||||
config_dict = {s:dict(config.items(s)) for s in config.sections()}
|
|
||||||
# Convert the booleans to pythonic booleans in the dict...
|
|
||||||
config_dict['bdisk']['user'] = config['bdisk'].getboolean('user')
|
|
||||||
config_dict['build']['i_am_a_racecar'] = config['build'].getboolean('i_am_a_racecar')
|
|
||||||
config_dict['build']['ipxe'] = config['build'].getboolean('ipxe')
|
|
||||||
config_dict['build']['sign'] = config['build'].getboolean('sign')
|
|
||||||
config_dict['build']['multiarch'] = (config_dict['build']['multiarch']).lower()
|
|
||||||
config_dict['ipxe']['iso'] = config['ipxe'].getboolean('iso')
|
|
||||||
config_dict['ipxe']['usb'] = config['ipxe'].getboolean('usb')
|
|
||||||
config_dict['sync']['git'] = config['sync'].getboolean('git')
|
|
||||||
config_dict['sync']['http'] = config['sync'].getboolean('http')
|
|
||||||
config_dict['sync']['rsync'] = config['sync'].getboolean('rsync')
|
|
||||||
config_dict['sync']['tftp'] = config['sync'].getboolean('tftp')
|
|
||||||
config_dict['rsync']['iso'] = config['rsync'].getboolean('iso')
|
|
||||||
# Get the version...
|
|
||||||
# Two possibilities.
|
|
||||||
# e.g. 1 commit after tag with 7-digit object hex: ['v3.10', '1', 'gb4a5e40']
|
|
||||||
# Or if were sitting on a tag with no commits: ['v3.10']
|
|
||||||
# So we want our REAL version to be the following:
|
|
||||||
# Tagged release: v#.##
|
|
||||||
# X number of commits after release: v#.##rX
|
|
||||||
# Both have the (local) build number appended to the deliverables,
|
|
||||||
# which is reset for an empty isodir OR a new tagged release (incl.
|
|
||||||
# commits on top of a new tagged release). e.g. for build Y:
|
|
||||||
# v#.##-Y or v#.##rX-Y
|
|
||||||
if config_dict['bdisk']['ver'] == '':
|
|
||||||
try:
|
|
||||||
repo = git.Repo(config_dict['build']['basedir'])
|
|
||||||
refs = repo.git.describe(repo.head.commit).split('-')
|
|
||||||
if len(refs) >= 2:
|
|
||||||
config_dict['bdisk']['ver'] = refs[0] + 'r' + refs[1]
|
|
||||||
else:
|
|
||||||
config_dict['bdisk']['ver'] = refs[0]
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} is NOT a valid git repository, and you did not specify bdisk:ver in your build.ini! ' +
|
|
||||||
'Did you perhaps install from a package manager? Please refer to the documentation.').format(datetime.datetime.now(),
|
|
||||||
config_dict['build']['basedir']))
|
|
||||||
# And the build number.
|
|
||||||
# TODO: support tracking builds per version. i.e. in buildnum:
|
|
||||||
# v2.51r13:0
|
|
||||||
# v2.51r17:3
|
|
||||||
if os.path.isfile(config_dict['build']['dlpath'] + '/buildnum'):
|
|
||||||
with open(config_dict['build']['dlpath'] + '/buildnum', 'r') as f:
|
|
||||||
config_dict['build']['buildnum'] = int(f.readlines()[0])
|
|
||||||
else:
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# But logically we should start the build over at 0 if we don't have any existing ISO's.
|
|
||||||
if os.path.isdir(config_dict['build']['isodir']):
|
|
||||||
if os.listdir(config_dict['build']['isodir']) == []:
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# ...or if we don't have any previous builds for this ISO version.
|
|
||||||
elif not glob.glob('{0}/*v{1}r*.iso'.format(config_dict['build']['isodir'], config_dict['bdisk']['ver'])):
|
|
||||||
config_dict['build']['buildnum'] = 0
|
|
||||||
# and build a list of arch(es) we want to build
|
|
||||||
if config_dict['build']['multiarch'] in ('','yes','true','1','no','false','0'):
|
|
||||||
config_dict['build']['arch'] = ['x86_64','i686']
|
|
||||||
elif config_dict['build']['multiarch'] in ('x86_64','64','no32'):
|
|
||||||
config_dict['build']['arch'] = ['x86_64']
|
|
||||||
elif config_dict['build']['multiarch'] in ('i686','32','no64'):
|
|
||||||
config_dict['build']['arch'] = ['i686']
|
|
||||||
else:
|
|
||||||
exit(('{0}: ERROR: {1} is not a valid value. Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['build']['multiarch']))
|
|
||||||
## VALIDATORS ##
|
|
||||||
# Validate bootstrap mirror
|
|
||||||
config_dict['src'] = {}
|
|
||||||
for a in config_dict['build']['arch']:
|
|
||||||
config_dict['src'][a] = config_dict['source_' + a]
|
|
||||||
if (validators.domain(config_dict['src'][a]['mirror']) or validators.ipv4(
|
|
||||||
config_dict['src'][a]['mirror']) or validatords.ipv6(
|
|
||||||
config_dict['src'][a]['mirror'])):
|
|
||||||
try:
|
|
||||||
getaddrinfo(config_dict['src'][a]['mirror'], None)
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used as a ' +
|
|
||||||
'mirror for the bootstrap tarballs. Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['src'][a]['host']))
|
|
||||||
config_dict['src'][a]['gpg'] = config['source_' + a].getboolean('gpg')
|
|
||||||
# Are we rsyncing? If so, validate the rsync host.
|
|
||||||
# Works for IP address too. It does NOT check to see if we can
|
|
||||||
# actually *rsync* to it; that'll come later.
|
|
||||||
if config_dict['sync']['rsync']:
|
|
||||||
if (validators.domain(config_dict['rsync']['host']) or validators.ipv4(
|
|
||||||
config_dict['rsync']['host']) or validators.ipv6(
|
|
||||||
config_dict['rsync']['host'])):
|
|
||||||
try:
|
|
||||||
getaddrinfo(config_dict['rsync']['host'], None)
|
|
||||||
except:
|
|
||||||
exit(('{0}: ERROR: {1} does not resolve and cannot be used for rsyncing.' +
|
|
||||||
'Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['rsync']['host']))
|
|
||||||
else:
|
|
||||||
exit(('{0}: ERROR: {1} is not a valid host and cannot be used for rsyncing.' +
|
|
||||||
'Check your configuration.').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['rsync']['host']))
|
|
||||||
# Validate the URI.
|
|
||||||
if config_dict['build']['ipxe']:
|
|
||||||
# so this won't validate e.g. custom LAN domains (https://pxeserver/bdisk.php). TODO.
|
|
||||||
if not validators.url(config_dict['ipxe']['uri']):
|
|
||||||
if not re.match('^https?://localhost(/.*)?$'):
|
|
||||||
exit('{0}: ERROR: {1} is not a valid URL/URI. Check your configuration.'.format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['ipxe']['uri']))
|
|
||||||
# Validate required paths
|
|
||||||
if not os.path.exists(config_dict['build']['basedir'] + '/extra'):
|
|
||||||
exit(("{0}: ERROR: {1} does not contain BDisk's core files!" +
|
|
||||||
"Check your configuration.").format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
config_dict['build']['basedir']))
|
|
||||||
# Make dirs if they don't exist
|
|
||||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
|
||||||
os.makedirs(config_dict['build'][d], exist_ok = True)
|
|
||||||
# Make dirs for sync staging if we need to
|
|
||||||
for x in ('http', 'tftp'):
|
|
||||||
if config_dict['sync'][x]:
|
|
||||||
os.makedirs(config_dict[x]['path'], exist_ok = True)
|
|
||||||
return(config, config_dict)
|
|
0
bdisk/iPXE.py
Normal file
0
bdisk/iPXE.py
Normal file
304
bdisk/ipxe.py
304
bdisk/ipxe.py
@ -1,304 +0,0 @@
|
|||||||
import os
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import jinja2
|
|
||||||
import git
|
|
||||||
import patch
|
|
||||||
import datetime
|
|
||||||
import humanize
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
|
|
||||||
def buildIPXE(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
mini = ipxe['iso']
|
|
||||||
prepdir = conf['build']['prepdir']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates'
|
|
||||||
ipxe_tpl = templates_dir + '/iPXE'
|
|
||||||
srcdir = build['srcdir']
|
|
||||||
embedscript = build['dlpath'] + '/EMBED'
|
|
||||||
ipxe_src = srcdir + '/ipxe'
|
|
||||||
#ipxe_git_uri = 'git://git.ipxe.org/ipxe.git'
|
|
||||||
ipxe_git_uri = 'http://git.ipxe.org/ipxe.git'
|
|
||||||
print('{0}: [IPXE] Prep/fetch sources...'.format(
|
|
||||||
datetime.datetime.now()))
|
|
||||||
# Get the source
|
|
||||||
if os.path.isdir(ipxe_src):
|
|
||||||
shutil.rmtree(ipxe_src)
|
|
||||||
ipxe_repo = git.Repo.clone_from(ipxe_git_uri, ipxe_src)
|
|
||||||
# Generate patches
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(ipxe_tpl)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
tpl = env.get_template('EMBED.j2')
|
|
||||||
tpl_out = tpl.render(ipxe = ipxe)
|
|
||||||
with open(embedscript, 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# Feature enabling
|
|
||||||
# In config/general.h
|
|
||||||
with open('{0}/src/config/general.h'.format(ipxe_src), 'r') as f:
|
|
||||||
generalconf = f.read()
|
|
||||||
# And in config/console.h
|
|
||||||
with open('{0}/src/config/console.h'.format(ipxe_src), 'r') as f:
|
|
||||||
consoleconf = f.read()
|
|
||||||
patterns = (('^#undef(\s*NET_PROTO_IPV6.*)$','#define\g<1>'), # enable IPv6
|
|
||||||
('^#undef(\s*DOWNLOAD_PROTO_HTTPS)','#define\g<1>'), # enable HTTPS
|
|
||||||
('^//(#define\s*IMAGE_TRUST_CMD)','\g<1>'), # moar HTTPS
|
|
||||||
('^#undef(\s*DOWNLOAD_PROTO_FTP)','#define\g<1>')) # enable FTP
|
|
||||||
#('^//(#define\s*CONSOLE_CMD)','\g<1>'), # BROKEN in EFI? TODO. if enable, replace } with , above etc.
|
|
||||||
#('^//(#define\s*IMAGE_PNG','\g<1>'), # SAME, broken in EFI? TODO.
|
|
||||||
#console = ('^//(#define\s*CONSOLE_VESAFB)','\g<1>') # BROKEN in EFI? TODO.
|
|
||||||
# https://stackoverflow.com/a/4427835
|
|
||||||
# https://emilics.com/notebook/enblog/p869.html
|
|
||||||
# The above methods don't seem to work. it craps out on the pattern matchings
|
|
||||||
# so we use tuples instead.
|
|
||||||
for x in patterns:
|
|
||||||
generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
|
|
||||||
with open('{0}/src/config/general.h'.format(ipxe_src), 'w') as f:
|
|
||||||
f.write(generalconf)
|
|
||||||
# Uncomment when we want to test the above consdict etc.
|
|
||||||
#for x in patterns:
|
|
||||||
# generalconf = re.sub(x[0], x[1], generalconf, flags=re.MULTILINE)
|
|
||||||
#with open('{0}/src/config/console.h'.format(ipxe_src), 'w') as f:
|
|
||||||
# f.write(console)
|
|
||||||
# Now we make!
|
|
||||||
cwd = os.getcwd()
|
|
||||||
os.chdir(ipxe_src + '/src')
|
|
||||||
modenv = os.environ.copy()
|
|
||||||
modenv['EMBED'] = embedscript
|
|
||||||
#modenv['TRUST'] = ipxe_ssl_ca # TODO: test these
|
|
||||||
#modenv['CERT'] = '{0},{1}'.format(ipxe_ssl_ca, ipxe_ssl_crt) # TODO: test these
|
|
||||||
#modenv['PRIVKEY'] = ipxe_ssl_ckey # TODO: test these
|
|
||||||
build_cmd = {}
|
|
||||||
build_cmd['base'] = ['/usr/bin/make',
|
|
||||||
'all',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
# TODO: copy the UNDI stuff/chainloader to tftpboot, if enabled
|
|
||||||
build_cmd['undi'] = ['/usr/bin/make',
|
|
||||||
'bin/ipxe.pxe',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
build_cmd['efi'] = ['/usr/bin/make',
|
|
||||||
'bin-i386-efi/ipxe.efi',
|
|
||||||
'bin-x86_64-efi/ipxe.efi',
|
|
||||||
'EMBED={0}'.format(embedscript)]
|
|
||||||
# Now we call the commands.
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
if os.path.isfile(build['dlpath'] + '/ipxe.log'):
|
|
||||||
os.remove(build['dlpath'] + '/ipxe.log')
|
|
||||||
print(('{0}: [IPXE] Building iPXE ({1}). PROGRESS: tail -f {2}/ipxe.log ...').format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
ipxe_src,
|
|
||||||
build['dlpath']))
|
|
||||||
with open('{0}/ipxe.log'.format(build['dlpath']), 'a') as f:
|
|
||||||
subprocess.call(build_cmd['base'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
subprocess.call(build_cmd['undi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
subprocess.call(build_cmd['efi'], stdout = f, stderr = subprocess.STDOUT, env=modenv)
|
|
||||||
print('{0}: [IPXE] Built iPXE image(s) successfully.'.format(datetime.datetime.now()))
|
|
||||||
os.chdir(cwd)
|
|
||||||
|
|
||||||
def genISO(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
arch = build['arch']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
ver = bdisk['ver']
|
|
||||||
isodir = build['isodir']
|
|
||||||
isofile = '{0}-{1}-{2}.mini.iso'.format(bdisk['uxname'], bdisk['ver'], build['buildnum'])
|
|
||||||
isopath = '{0}/{1}'.format(isodir, isofile)
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
mini = ipxe['iso']
|
|
||||||
iso = {}
|
|
||||||
srcdir = build['srcdir']
|
|
||||||
ipxe_src = srcdir + '/ipxe'
|
|
||||||
mountpt = build['mountpt']
|
|
||||||
templates_dir = build['basedir'] + '/extra/templates/iPXE/'
|
|
||||||
tpl_loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = tpl_loader)
|
|
||||||
bootdir = '{0}/ipxe_mini'.format(dlpath)
|
|
||||||
efiboot_img = '{0}/EFI/{1}/efiboot.img'.format(bootdir, bdisk['name'])
|
|
||||||
innerefi64 = '{0}/src/bin-x86_64-efi/ipxe.efi'.format(ipxe_src)
|
|
||||||
efi = False
|
|
||||||
# this shouldn't be necessary... if it is, we can revisit this in the future. see "Inner dir" below.
|
|
||||||
#innerefi32 = '{0}/src/bin-i386-efi/ipxe.efi'.format(ipxe_src)
|
|
||||||
# We only need to do EFI prep if we have UEFI/x86_64 support. See above, but IA64 is dead, Zed.
|
|
||||||
if mini and (('x86_64') in arch):
|
|
||||||
efi = True
|
|
||||||
# EFI prep/building
|
|
||||||
print('{0}: [IPXE] UEFI support for Mini ISO...'.format(datetime.datetime.now()))
|
|
||||||
if os.path.isdir(bootdir):
|
|
||||||
shutil.rmtree(bootdir)
|
|
||||||
os.makedirs(os.path.dirname(efiboot_img), exist_ok = True) # FAT32 embedded EFI dir
|
|
||||||
os.makedirs('{0}/EFI/boot'.format(bootdir), exist_ok = True) # EFI bootloader binary dir
|
|
||||||
# Inner dir (miniboot.img file)
|
|
||||||
#sizetotal = 2097152 # 2MB wiggle room. increase this if we add IA64.
|
|
||||||
sizetotal = 34603008 # 33MB wiggle room. increase this if we add IA64.
|
|
||||||
sizetotal += os.path.getsize(innerefi64)
|
|
||||||
sizefiles = ['HashTool', 'PreLoader']
|
|
||||||
for f in sizefiles:
|
|
||||||
sizetotal += os.path.getsize('{0}/root.x86_64/usr/share/efitools/efi/{1}.efi'.format(
|
|
||||||
chrootdir,
|
|
||||||
f))
|
|
||||||
# These won't be *quite* accurate since it's before the template substitution,
|
|
||||||
# but it'll be close enough.
|
|
||||||
for (path, dirs, files) in os.walk(templates_dir):
|
|
||||||
for file in files:
|
|
||||||
fname = os.path.join(path, file)
|
|
||||||
sizetotal += os.path.getsize(fname)
|
|
||||||
print("{0}: [IPXE] Creating EFI ESP image {1} ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
efiboot_img,
|
|
||||||
humanize.naturalsize(sizetotal)))
|
|
||||||
if os.path.isfile(efiboot_img):
|
|
||||||
os.remove(efiboot_img)
|
|
||||||
with open(efiboot_img, 'wb+') as f:
|
|
||||||
f.truncate(sizetotal)
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
cmd = ['/sbin/mkfs.fat', '-F', '32', '-n', 'iPXE_EFI', efiboot_img]
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
cmd = ['/bin/mount', efiboot_img, mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
os.makedirs(mountpt + '/EFI/boot', exist_ok = True) # "Inner" (EFI image)
|
|
||||||
#os.makedirs('{0}/EFI/{1}'.format(mountpt, bdisk['name']), exist_ok = True) # "Inner" (EFI image)
|
|
||||||
os.makedirs('{0}/boot'.format(bootdir), exist_ok = True) # kernel(s)
|
|
||||||
os.makedirs('{0}/loader/entries'.format(bootdir), exist_ok = True) # EFI
|
|
||||||
for d in (mountpt, bootdir):
|
|
||||||
shutil.copy2(innerefi64,'{0}/EFI/boot/ipxe.efi'.format(d))
|
|
||||||
for f in ('PreLoader.efi', 'HashTool.efi'):
|
|
||||||
if f == 'PreLoader.efi':
|
|
||||||
fname = 'bootx64.efi'
|
|
||||||
else:
|
|
||||||
fname = f
|
|
||||||
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir,f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(mountpt, fname), 'wb') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
with open('{0}/root.x86_64/usr/share/efitools/efi/{1}'.format(
|
|
||||||
chrootdir, f),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/{1}'.format(bootdir, fname), 'wb+') as file:
|
|
||||||
file.write(r.read())
|
|
||||||
# And the systemd efi bootloader.
|
|
||||||
with open('{0}/root.x86_64/usr/lib/systemd/boot/efi/systemd-bootx64.efi'.format(
|
|
||||||
chrootdir),
|
|
||||||
'rb') as r:
|
|
||||||
with open('{0}/EFI/boot/loader.efi'.format(mountpt), 'wb+') as f:
|
|
||||||
f.write(r.read())
|
|
||||||
|
|
||||||
# And loader entries.
|
|
||||||
os.makedirs('{0}/loader/entries'.format(mountpt, exist_ok = True))
|
|
||||||
for t in ('loader', 'base'):
|
|
||||||
if t == 'base':
|
|
||||||
name = bdisk['uxname']
|
|
||||||
tplpath = '{0}/loader/entries'.format(mountpt)
|
|
||||||
else:
|
|
||||||
name = t
|
|
||||||
tplpath = '{0}/loader'.format(mountpt)
|
|
||||||
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
cmd = ['/bin/umount', mountpt]
|
|
||||||
subprocess.call(cmd)
|
|
||||||
# Outer dir
|
|
||||||
outerdir = True
|
|
||||||
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True) # BIOS
|
|
||||||
# Loader entries (outer)
|
|
||||||
for t in ('loader','base'):
|
|
||||||
if t == 'base':
|
|
||||||
name = bdisk['uxname']
|
|
||||||
tplpath = '{0}/loader/entries'.format(bootdir)
|
|
||||||
else:
|
|
||||||
name = t
|
|
||||||
tplpath = '{0}/loader'.format(bootdir)
|
|
||||||
tpl = env.get_template('EFI/{0}.conf.j2'.format(t))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, outerdir = outerdir)
|
|
||||||
with open('{0}/{1}.conf'.format(tplpath, name), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
if mini:
|
|
||||||
# BIOS prepping
|
|
||||||
shutil.copy2('{0}/src/bin/ipxe.lkrn'.format(ipxe_src), '{0}/boot/ipxe.krn'.format(bootdir))
|
|
||||||
isolinux_filelst = ['isolinux.bin',
|
|
||||||
'ldlinux.c32']
|
|
||||||
os.makedirs('{0}/isolinux'.format(bootdir), exist_ok = True)
|
|
||||||
for f in isolinux_filelst:
|
|
||||||
shutil.copy2('{0}/root.{1}/usr/lib/syslinux/bios/{2}'.format(chrootdir, arch[0], f), '{0}/isolinux/{1}'.format(bootdir, f))
|
|
||||||
tpl = env.get_template('BIOS/isolinux.cfg.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk)
|
|
||||||
with open('{0}/isolinux/isolinux.cfg'.format(bootdir), "w+") as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
print("{0}: [IPXE] Building Mini ISO ({1})...".format(datetime.datetime.now(), isopath))
|
|
||||||
if efi:
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'] + '_MINI',
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
|
|
||||||
'-eltorito-alt-boot',
|
|
||||||
'-e', 'EFI/{0}/{1}'.format(bdisk['name'], os.path.basename(efiboot_img)),
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
bootdir]
|
|
||||||
else:
|
|
||||||
# UNTESTED. TODO.
|
|
||||||
# I think i want to also get rid of: -boot-load-size 4,
|
|
||||||
# -boot-info-table, ... possiblyyy -isohybrid-gpt-basedat...
|
|
||||||
# https://wiki.archlinux.org/index.php/Unified_Extensible_Firmware_Interface#Remove_UEFI_boot_support_from_Optical_Media
|
|
||||||
cmd = ['/usr/bin/xorriso',
|
|
||||||
'-as', 'mkisofs',
|
|
||||||
'-iso-level', '3',
|
|
||||||
'-full-iso9660-filenames',
|
|
||||||
'-volid', bdisk['name'] + '_MINI',
|
|
||||||
'-appid', bdisk['desc'],
|
|
||||||
'-publisher', bdisk['dev'],
|
|
||||||
'-preparer', 'prepared by ' + bdisk['dev'],
|
|
||||||
'-eltorito-boot', 'isolinux/isolinux.bin',
|
|
||||||
'-eltorito-catalog', 'isolinux/boot.cat',
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-boot-load-size', '4',
|
|
||||||
'-boot-info-table',
|
|
||||||
'-isohybrid-mbr', '{0}/root.{1}/usr/lib/syslinux/bios/isohdpfx.bin'.format(chrootdir, arch[0]),
|
|
||||||
'-no-emul-boot',
|
|
||||||
'-isohybrid-gpt-basdat',
|
|
||||||
'-output', isopath,
|
|
||||||
bootdir]
|
|
||||||
DEVNULL = open(os.devnull, 'w')
|
|
||||||
subprocess.call(cmd, stdout = DEVNULL, stderr = subprocess.STDOUT)
|
|
||||||
# Get size of ISO
|
|
||||||
iso['name'] = ['Mini']
|
|
||||||
iso['Mini'] = {}
|
|
||||||
iso['Mini']['sha'] = hashlib.sha256()
|
|
||||||
with open(isopath, 'rb') as f:
|
|
||||||
while True:
|
|
||||||
stream = f.read(65536) # 64kb chunks
|
|
||||||
if not stream:
|
|
||||||
break
|
|
||||||
iso['Mini']['sha'].update(stream)
|
|
||||||
iso['Mini']['sha'] = iso['Mini']['sha'].hexdigest()
|
|
||||||
iso['Mini']['file'] = isopath
|
|
||||||
iso['Mini']['size'] = humanize.naturalsize(os.path.getsize(isopath))
|
|
||||||
iso['Mini']['type'] = 'Mini'
|
|
||||||
iso['Mini']['fmt'] = 'Hybrid ISO'
|
|
||||||
return(iso)
|
|
||||||
|
|
||||||
def tftpbootEnv(conf):
|
|
||||||
build = conf['build']
|
|
||||||
ipxe = conf['ipxe']
|
|
||||||
sync = conf['sync']
|
|
||||||
if sync['tftp']:
|
|
||||||
pass # TODO: generate a pxelinux.cfg in bdisk/tftp.py (to write) and sync in the ipxe chainloader here
|
|
0
bdisk/main.py
Normal file
0
bdisk/main.py
Normal file
375
bdisk/prep.py
375
bdisk/prep.py
@ -1,375 +0,0 @@
|
|||||||
import os
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import hashlib
|
|
||||||
import tarfile
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
import jinja2
|
|
||||||
import datetime
|
|
||||||
import humanize
|
|
||||||
from urllib.request import urlopen
|
|
||||||
import host # bdisk.host
|
|
||||||
import bGPG # bdisk.bGPG
|
|
||||||
|
|
||||||
|
|
||||||
def dirChk(conf):
|
|
||||||
# Make dirs if they don't exist
|
|
||||||
for d in ('archboot', 'isodir', 'mountpt', 'srcdir', 'prepdir'):
|
|
||||||
os.makedirs(conf['build'][d], exist_ok = True)
|
|
||||||
# Make dirs for sync staging if we need to
|
|
||||||
for x in ('http', 'tftp'):
|
|
||||||
if conf['sync'][x]:
|
|
||||||
os.makedirs(conf[x]['path'], exist_ok = True)
|
|
||||||
|
|
||||||
def downloadTarball(conf):
|
|
||||||
build = conf['build']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
src = conf['src']
|
|
||||||
arch = build['arch']
|
|
||||||
tarball_path = {}
|
|
||||||
for a in arch:
|
|
||||||
locsrc = conf['source_' + a]
|
|
||||||
mirror = locsrc['mirrorproto'] + '://' + locsrc['mirror']
|
|
||||||
rlsdir = mirror + locsrc['mirrorpath']
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
if locsrc['chksumtype'] == '':
|
|
||||||
exit("{0}: source_{1}:chksumtype is unset!".format(datetime.datetime.now(), a))
|
|
||||||
hash_type = locsrc['chksumtype']
|
|
||||||
hash_in = urlopen(mirror + locsrc['mirrorchksum'])
|
|
||||||
hashsums = hash_in.read()
|
|
||||||
hash_in.close()
|
|
||||||
hash_raw = hashsums.decode("utf-8")
|
|
||||||
hash_list = list(filter(None, hash_raw.split('\n')))
|
|
||||||
hash_dict = {x.split()[1]: x.split()[0] for x in hash_list}
|
|
||||||
# returns path/filename e.g. /some/path/to/file.tar.gz
|
|
||||||
# we use .gnupg since we'll need it later.
|
|
||||||
os.makedirs(dlpath + '/.gnupg', exist_ok = True)
|
|
||||||
tarball_path[a] = dlpath + '/.latest.' + a + '.tar'
|
|
||||||
pattern = re.compile('^.*' + a + '\.tar(\.(gz|bz2|xz))?$')
|
|
||||||
if locsrc['mirrorfile'] != '':
|
|
||||||
tarball = locsrc['mirrorfile']
|
|
||||||
else:
|
|
||||||
tarball = [filename.group(0) for l in list(hash_dict.keys()) for filename in [pattern.search(l)] if filename][0]
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
hashsum = hash_dict[tarball]
|
|
||||||
if os.path.isfile(tarball_path[a]):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# fetch the tarball...
|
|
||||||
print("{0}: [PREP] Fetching tarball ({1} architecture)...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
a))
|
|
||||||
tarball_dl = urlopen(rlsdir + tarball)
|
|
||||||
with open(tarball_path[a], 'wb') as f:
|
|
||||||
f.write(tarball_dl.read())
|
|
||||||
tarball_dl.close()
|
|
||||||
print("{0}: [PREP] Done fetching {1} ({2}).".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball_path[a],
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball_path[a]))))
|
|
||||||
if locsrc['mirrorchksum'] != '':
|
|
||||||
print("{0}: [PREP] Checking hash checksum {1} against {2}...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
hashsum,
|
|
||||||
tarball_path[a]))
|
|
||||||
# Calculate the checksum according to type specified.
|
|
||||||
tarball_hash = False
|
|
||||||
for i in hashlib.algorithms_available:
|
|
||||||
if hash_type == i:
|
|
||||||
hashfunc = getattr(hashlib, i)
|
|
||||||
tarball_hash = hashfunc(open(tarball_path[a], 'rb').read()).hexdigest()
|
|
||||||
break
|
|
||||||
if not tarball_hash:
|
|
||||||
exit("{0}: source_{1}:chksumtype '{2}' is not supported on this machine!".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
a,
|
|
||||||
hash_type))
|
|
||||||
if tarball_hash != hashsum:
|
|
||||||
exit(("{0}: {1} either did not download correctly\n\t\t\t or a wrong (probably old) version exists on the filesystem.\n\t\t\t " +
|
|
||||||
"Please delete it and try again.").format(datetime.datetime.now(), tarball))
|
|
||||||
if locsrc['mirrorgpgsig'] != '':
|
|
||||||
# let's verify the signature.
|
|
||||||
if locsrc['mirrorgpgsig'] == '.sig':
|
|
||||||
gpgsig_remote = rlsdir + tarball + '.sig'
|
|
||||||
else:
|
|
||||||
gpgsig_remote = locsrc['mirrorgpgsig']
|
|
||||||
sig_dl = urlopen(gpgsig_remote)
|
|
||||||
sig = tarball_path[a] + '.sig'
|
|
||||||
with open(sig, 'wb+') as f:
|
|
||||||
f.write(sig_dl.read())
|
|
||||||
sig_dl.close()
|
|
||||||
gpg_verify = bGPG.gpgVerify(sig, tarball_path[a], conf)
|
|
||||||
if not gpg_verify:
|
|
||||||
exit("{0}: There was a failure checking {1} against {2}. Please investigate.".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
sig,
|
|
||||||
tarball_path[a]))
|
|
||||||
return(tarball_path)
|
|
||||||
|
|
||||||
def unpackTarball(tarball_path, build, keep = False):
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
if os.path.isdir(chrootdir):
|
|
||||||
if not keep:
|
|
||||||
# Make the dir if it doesn't exist
|
|
||||||
shutil.rmtree(chrootdir, ignore_errors = True)
|
|
||||||
os.makedirs(chrootdir, exist_ok = True)
|
|
||||||
else:
|
|
||||||
os.makedirs(chrootdir, exist_ok = True)
|
|
||||||
# Open and extract the tarball
|
|
||||||
if not keep:
|
|
||||||
for a in build['arch']:
|
|
||||||
print("{0}: [PREP] Extracting tarball {1} ({2})...".format(
|
|
||||||
datetime.datetime.now(),
|
|
||||||
tarball_path[a],
|
|
||||||
humanize.naturalsize(
|
|
||||||
os.path.getsize(tarball_path[a]))))
|
|
||||||
tar = tarfile.open(tarball_path[a], 'r:gz')
|
|
||||||
tar.extractall(path = chrootdir)
|
|
||||||
tar.close()
|
|
||||||
print("{0}: [PREP] Extraction for {1} finished.".format(datetime.datetime.now(), tarball_path[a]))
|
|
||||||
|
|
||||||
def buildChroot(conf, keep = False):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
user = conf['user']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
extradir = build['basedir'] + '/extra'
|
|
||||||
unpack_me = unpackTarball(downloadTarball(conf), build, keep)
|
|
||||||
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
|
|
||||||
prebuild_overlay = {}
|
|
||||||
prebuild_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
prebuild_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
prebuild_overlay[y] = []
|
|
||||||
prebuild_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/pre-build.d/'.format(extradir)):
|
|
||||||
prebuild_overlay['dirs'].append('{0}/'.format(path))
|
|
||||||
for file in files:
|
|
||||||
prebuild_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in prebuild_overlay.keys():
|
|
||||||
prebuild_overlay[x][:] = [re.sub('^{0}/pre-build.d/'.format(extradir), '', s) for s in prebuild_overlay[x]]
|
|
||||||
prebuild_overlay[x] = list(filter(None, prebuild_overlay[x]))
|
|
||||||
for y in prebuild_arch_overlay.keys():
|
|
||||||
prebuild_arch_overlay[y][x][:] = [i for i in prebuild_overlay[x] if i.startswith(y)]
|
|
||||||
prebuild_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in prebuild_arch_overlay[y][x]]
|
|
||||||
prebuild_arch_overlay[y][x] = list(filter(None, prebuild_arch_overlay[y][x]))
|
|
||||||
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
prebuild_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
for a in arch:
|
|
||||||
for dir in prebuild_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and copy over the files. again, chown to root.
|
|
||||||
for file in prebuild_overlay['files']:
|
|
||||||
shutil.copy2('{0}/pre-build.d/{1}'.format(extradir, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in prebuild_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in prebuild_arch_overlay[a]['files']:
|
|
||||||
shutil.copy2('{0}/pre-build.d/{1}/{2}'.format(extradir, a, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
|
|
||||||
def prepChroot(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
user = conf['user']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
prepdir = build['prepdir']
|
|
||||||
arch = build['arch']
|
|
||||||
bdisk_repo_dir = build['basedir']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
templates_dir = bdisk_repo_dir + '/extra/templates'
|
|
||||||
#build = {} # why was this here?
|
|
||||||
## let's prep some variables to write out the version info.txt
|
|
||||||
# and these should be passed in from the args, from the most part.
|
|
||||||
build['name'] = bdisk['name']
|
|
||||||
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
|
|
||||||
hostname = host.getHostname
|
|
||||||
build['user'] = os.environ['USER']
|
|
||||||
if 'SUDO_USER' in os.environ:
|
|
||||||
build['realuser'] = os.environ['SUDO_USER']
|
|
||||||
build['buildnum'] += 1
|
|
||||||
with open(dlpath + '/buildnum', 'w+') as f:
|
|
||||||
f.write(str(build['buildnum']) + "\n")
|
|
||||||
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
tpl = env.get_template('VERSION_INFO.txt.j2')
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, hostname = host.getHostname(), distro = host.getOS())
|
|
||||||
for a in arch:
|
|
||||||
# Copy the GPG pubkey
|
|
||||||
shutil.copy2('{0}/gpgkey.pub'.format(dlpath), '{0}/root.{1}/root/pubkey.gpg'.format(chrootdir, a))
|
|
||||||
# Write the VERSION_INFO.txt from template
|
|
||||||
with open('{0}/root.{1}/root/VERSION_INFO.txt'.format(chrootdir, a), 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
with open('{0}/VERSION_INFO.txt'.format(prepdir), 'w+') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
# And perform the templating overlays
|
|
||||||
templates_overlay = {}
|
|
||||||
templates_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
templates_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
templates_overlay[y] = []
|
|
||||||
templates_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/pre-build.d'.format(templates_dir)):
|
|
||||||
for dir in dirs:
|
|
||||||
templates_overlay['dirs'].append('{0}/'.format(dir))
|
|
||||||
for file in files:
|
|
||||||
templates_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in templates_overlay.keys():
|
|
||||||
templates_overlay[x][:] = [re.sub('^{0}/pre-build.d/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
|
|
||||||
templates_overlay[x] = list(filter(None, templates_overlay[x]))
|
|
||||||
for y in templates_arch_overlay.keys():
|
|
||||||
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
|
|
||||||
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
if '/' in templates_overlay['dirs']:
|
|
||||||
templates_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
if build['gpg']:
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']['mygpgkey']:
|
|
||||||
signkey = conf['gpg']['mygpgkey']
|
|
||||||
else:
|
|
||||||
signkey = str(gpg.signers[0].subkeys[0].fpr)
|
|
||||||
for a in arch:
|
|
||||||
for dir in templates_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and write the files. again, chown to root.
|
|
||||||
for file in templates_overlay['files']:
|
|
||||||
tplname = 'pre-build.d/{0}.j2'.format(file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey, user = user)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in templates_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in templates_arch_overlay[a]['files']:
|
|
||||||
tplname = 'pre-build.d/{0}/{1}.j2'.format(a, file)
|
|
||||||
tpl = env.get_template('{0}'.format(tplname))
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
return(build)
|
|
||||||
|
|
||||||
def postChroot(conf):
|
|
||||||
build = conf['build']
|
|
||||||
bdisk = conf['bdisk']
|
|
||||||
dlpath = build['dlpath']
|
|
||||||
chrootdir = build['chrootdir']
|
|
||||||
arch = build['arch']
|
|
||||||
overdir = build['basedir'] + '/overlay/'
|
|
||||||
templates_dir = '{0}/extra/templates'.format(build['basedir'])
|
|
||||||
loader = jinja2.FileSystemLoader(templates_dir)
|
|
||||||
env = jinja2.Environment(loader = loader)
|
|
||||||
postbuild_overlay = {}
|
|
||||||
postbuild_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
os.remove('{0}/root.{1}/README'.format(chrootdir, x))
|
|
||||||
postbuild_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
postbuild_overlay[y] = []
|
|
||||||
postbuild_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk(overdir):
|
|
||||||
postbuild_overlay['dirs'].append('{0}/'.format(path))
|
|
||||||
for file in files:
|
|
||||||
postbuild_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in postbuild_overlay.keys():
|
|
||||||
postbuild_overlay[x][:] = [re.sub('^' + overdir, '', s) for s in postbuild_overlay[x]]
|
|
||||||
postbuild_overlay[x] = list(filter(None, postbuild_overlay[x]))
|
|
||||||
for y in postbuild_arch_overlay.keys():
|
|
||||||
postbuild_arch_overlay[y][x][:] = [i for i in postbuild_overlay[x] if i.startswith(y)]
|
|
||||||
postbuild_arch_overlay[y][x][:] = [re.sub('^' + y + '/', '', s) for s in postbuild_arch_overlay[y][x]]
|
|
||||||
postbuild_arch_overlay[y][x] = list(filter(None, postbuild_arch_overlay[y][x]))
|
|
||||||
postbuild_overlay[x][:] = [y for y in postbuild_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
postbuild_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
for a in arch:
|
|
||||||
for dir in postbuild_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
|
|
||||||
# and copy over the files. again, chown to root.
|
|
||||||
for file in postbuild_overlay['files']:
|
|
||||||
shutil.copy2(overdir + file, '{0}/root.{1}/{2}'.format(chrootdir, a, file), follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in postbuild_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0, follow_symlinks = False)
|
|
||||||
for file in postbuild_arch_overlay[a]['files']:
|
|
||||||
shutil.copy2('{0}{1}/{2}'.format(overdir, a, file),
|
|
||||||
'{0}/root.{1}/{2}'.format(chrootdir, a, file),
|
|
||||||
follow_symlinks = False)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# And perform the templating overlays
|
|
||||||
templates_overlay = {}
|
|
||||||
templates_arch_overlay = {}
|
|
||||||
for x in arch:
|
|
||||||
templates_arch_overlay[x] = {}
|
|
||||||
for y in ['files', 'dirs']:
|
|
||||||
templates_overlay[y] = []
|
|
||||||
templates_arch_overlay[x][y] = []
|
|
||||||
for path, dirs, files in os.walk('{0}/overlay'.format(templates_dir)):
|
|
||||||
for dir in dirs:
|
|
||||||
templates_overlay['dirs'].append('{0}/'.format(dir))
|
|
||||||
for file in files:
|
|
||||||
templates_overlay['files'].append(os.path.join(path, file))
|
|
||||||
for x in templates_overlay.keys():
|
|
||||||
templates_overlay[x][:] = [re.sub('^{0}/overlay/(.*)(\.j2)'.format(templates_dir), '\g<1>', s) for s in templates_overlay[x]]
|
|
||||||
templates_overlay[x] = list(filter(None, templates_overlay[x]))
|
|
||||||
for y in templates_arch_overlay.keys():
|
|
||||||
templates_arch_overlay[y][x][:] = [i for i in templates_overlay[x] if i.startswith(y)]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/(.*)(\.j2)'.format(y), '\g<1>', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x][:] = [re.sub('^{0}/'.format(y), '', s) for s in templates_arch_overlay[y][x]]
|
|
||||||
templates_arch_overlay[y][x] = list(filter(None, templates_arch_overlay[y][x]))
|
|
||||||
templates_overlay[x][:] = [y for y in templates_overlay[x] if not y.startswith(('x86_64','i686'))]
|
|
||||||
if '/' in templates_overlay['dirs']:
|
|
||||||
templates_overlay['dirs'].remove('/')
|
|
||||||
# create the dir structure. these should almost definitely be owned by root.
|
|
||||||
if build['gpg']:
|
|
||||||
gpg = conf['gpgobj']
|
|
||||||
if conf['gpg']['mygpgkey']:
|
|
||||||
signkey = conf['gpg']['mygpgkey']
|
|
||||||
else:
|
|
||||||
signkey = str(gpg.signers[0].subkeys[0].fpr)
|
|
||||||
for a in arch:
|
|
||||||
for dir in templates_overlay['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
# and write the files. again, chown to root.
|
|
||||||
for file in templates_overlay['files']:
|
|
||||||
tplname = 'overlay/{0}.j2'.format(file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
||||||
# do the same for arch-specific stuff.
|
|
||||||
for dir in templates_arch_overlay[a]['dirs']:
|
|
||||||
os.makedirs('{0}/root.{1}/{2}'.format(chrootdir, a, dir), exist_ok = True)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, dir), 0, 0)
|
|
||||||
for file in templates_arch_overlay[a]['files']:
|
|
||||||
tplname = 'overlay/{0}/{1}.j2'.format(a, file)
|
|
||||||
tpl = env.get_template(tplname)
|
|
||||||
tpl_out = tpl.render(build = build, bdisk = bdisk, mygpgkey = signkey)
|
|
||||||
with open('{0}/root.{1}/{2}'.format(chrootdir, a, file), 'w') as f:
|
|
||||||
f.write(tpl_out)
|
|
||||||
os.chown('{0}/root.{1}/{2}'.format(chrootdir, a, file), 0, 0, follow_symlinks = False)
|
|
0
bdisk/sync.py
Normal file
0
bdisk/sync.py
Normal file
1
bdisk/version.py
Normal file
1
bdisk/version.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
BDISK_VERSION = '4.0.0a1'
|
674
docs/COPYING
674
docs/COPYING
@ -1,674 +0,0 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
12
docs/CREDITS
12
docs/CREDITS
@ -1,12 +0,0 @@
|
|||||||
iPXE:
|
|
||||||
Thanks to "eworm" for his work on the AUR iPXE-git package:
|
|
||||||
https://aur.archlinux.org/packages/ipxe-git/
|
|
||||||
|
|
||||||
and specifically the following patches:
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0001-git-version.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0002-banner.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0003-iso-efi.patch
|
|
||||||
http://www.eworm.de/download/linux/ipxe-0004-fix-no-pie-workaround.patch
|
|
||||||
|
|
||||||
|
|
||||||
thanks to jthan, even though he drives me batty sometimes.
|
|
33
docs/FAQ
33
docs/FAQ
@ -1,33 +0,0 @@
|
|||||||
BDisk Frequently Asked(/Unasked) Questions
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
0.) Why does it take so long to build?
|
|
||||||
1.) Why is the generated ISO file so big?
|
|
||||||
2.) How do I find the version/release/etc. number of an ISO?
|
|
||||||
|
|
||||||
|
|
||||||
=========================================================
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
0.) WHY DOES IT TAKE SO LONG TO BUILD?
|
|
||||||
A: This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server.
|
|
||||||
If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots.
|
|
||||||
Thankfully, there is an easy fix. You can install the "haveged"(http://www.issihosts.com/haveged/) software and run it. This will
|
|
||||||
show an immediate and non-negligible improvement for the above contexts. If you have extra power to throw at it (or are using a dedicated build box)
|
|
||||||
as well, I recommend enabling I_AM_A_RACECAR in your build.conf. BDisk will then be more aggressive with its resource consumption.
|
|
||||||
|
|
||||||
|
|
||||||
1.) WHY IS THE GENERATED ISO FILE SO BIG?
|
|
||||||
A: You may have enabled a LOT of packages in extra/packages.(32|64|both). Or you're using the default set of packages, which tries to include a LOT
|
|
||||||
of different (and in some cases, redundant) packages for widespread utilization and usage. In addition, keep in mind that BDisk builds a single ISO
|
|
||||||
that can be used on both i686 architectures AND full x86_64 architectures ("AMD64" as you may sometimes see it referenced). Because it doesn't cheat
|
|
||||||
and just use a 64-bit kernel with a 32-bit userland, it needs two different squash images on each ISO- one for 32-bit userland and one for 64-bit
|
|
||||||
userland.
|
|
||||||
|
|
||||||
2.) HOW DO I FIND THE VERSION/RELEASE/ETC. NUMBER OF AN ISO?
|
|
||||||
A: This can be found in a multitude of places. The full-size ISO file (iso/<distname>-<git tag>-<git rev number>-(32|64|any).iso) should have the
|
|
||||||
version right in the file name. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux,
|
|
||||||
*BSD, or Mac OS X and check /path/to/mounted/iso/VERSION_INTO.txt. Lastly, within the runtime itself (especially handy if booting via iPXE), you can
|
|
||||||
check /root/VERSION_INFO.txt within the running live environment.
|
|
@ -1 +0,0 @@
|
|||||||
COPYING
|
|
@ -1,8 +0,0 @@
|
|||||||
Please see the full documentation at https://bdisk.square-r00t.net
|
|
||||||
|
|
||||||
Alternatively, you can compile the manual yourself (requires asciidoc and asciidoctor):
|
|
||||||
|
|
||||||
cd docs/manual
|
|
||||||
asciidoctor -o /tmp/README.html HEAD.adoc
|
|
||||||
|
|
||||||
Then point your browser to /tmp/README.html
|
|
@ -1 +0,0 @@
|
|||||||
CREDITS
|
|
60
docs/TODO
60
docs/TODO
@ -1,60 +0,0 @@
|
|||||||
-investigate weird signing issue- if you specify a key to sign with, it appears that the squashed images (at least in the http dir) doesn't have a sig/asc. do they need to copy it over? or is it not even signing them?
|
|
||||||
-switch from python-pygpgme to python-gpgme for better performance. also clean up bGPG in general; reference KANT.
|
|
||||||
-more pythonic! classes (because inits help), use list or tuple constant for checksums, try vars-ing the configparser stuff (and move defaults to in-code?),
|
|
||||||
change path combinations to use os.path.join etc.
|
|
||||||
-modularity: https://stackoverflow.com/a/8719100
|
|
||||||
|
|
||||||
-mtree-like functionality; if mtree spec is found, apply that to files in overlay (or chroot even); otherwise copy from overlay and don't touch chroot
|
|
||||||
|
|
||||||
-i_am_a_racecar optimizations
|
|
||||||
- different distro guests (debian, etc.)- https://stackoverflow.com/questions/2349991/python-how-to-import-other-python-files/20749411#20749411
|
|
||||||
-incorporate this into the manual?
|
|
||||||
-TFTP configs generated for pxelinux
|
|
||||||
-fix the branding, etc. on ipxe. :(
|
|
||||||
-add ipxe to full iso maybe?
|
|
||||||
-include benchmarking
|
|
||||||
-- http://sourceforge.net/projects/unixbench/
|
|
||||||
-- https://code.google.com/p/byte-unixbench/
|
|
||||||
-- https://github.com/akopytov/sysbench
|
|
||||||
-- (http://blog.due.io/2014/linode-digitalocean-and-vultr-comparison/ etc.)
|
|
||||||
-There *has* to be a better way of handling package installation in the chroots.
|
|
||||||
--implement pyalpm to decreate dependency on chroot pacman-ing?
|
|
||||||
--or even maybe https://wiki.archlinux.org/index.php/offline_installation_of_packages in pure python!
|
|
||||||
-set up automatic exporting to PDF of the user manual server-side. https://pypi.python.org/pypi/unoconv/0.6
|
|
||||||
-maybe remove lxde, firefox, chrome and replace with enlightenment/midori?
|
|
||||||
-custom repo? https://brainwreckedtech.wordpress.com/2013/01/27/making-your-own-arch-linux-repository/
|
|
||||||
--https://wiki.archlinux.org/index.php/Building_32-bit_packages_on_a_64-bit_system # NOTE: arch has dropped i686, now continued as archlinux32
|
|
||||||
-implement better "additional" packages list. specify for path in build.ini- these should be more easily changed by end users. DON'T TOUCH iso.pkgs.lst since those are necessary for booting.
|
|
||||||
-automatic shorewall/some other firewall?
|
|
||||||
-autodetection/configuration of network. DHCP is currently running by default, but does it need to support IPv6? if so, how would the user configure their network?
|
|
||||||
-DISABLE NETWORKMANAGER AND "fi.w1.wpa_supplicant1"??? keeps spawning wpa_supplicant (and thusly killing networking proper)
|
|
||||||
-for netboot, custom user agent (should be defined by build.ini)
|
|
||||||
--iPXE's curl
|
|
||||||
--initrd's curl
|
|
||||||
-WISH: Better logging/debugging
|
|
||||||
https://web.archive.org/web/20170726052946/http://www.lexev.org/en/2013/python-logging-every-day/
|
|
||||||
|
|
||||||
-WISH: signing for secureboot releases (PreLoader and loader.efi handle this okay, but require manual intervention)
|
|
||||||
-does loader.efi support splash backgrounds? can i implement that differently somehow?
|
|
||||||
--yes, see e.g. https://www.reddit.com/r/archlinux/comments/3bwgf0/where_put_the_splasharchbmp_to_splash_screen_boot/
|
|
||||||
-strip out/remove unnecessary and orphan packages (e.g. gcc, make, automake, etc.) before building ISO
|
|
||||||
-incorporate iPXE tweaks:
|
|
||||||
--http://ipxe.org/crypto
|
|
||||||
--http://ipxe.org/cmd/imgtrust
|
|
||||||
--http://ipxe.org/cmd/imgverify
|
|
||||||
--enable use of custom CA/self-signed certs for HTTPS etc.
|
|
||||||
--signed kernel and initrd for ipxe:
|
|
||||||
---#imgtrust --permanent
|
|
||||||
---#imgverify vmlinuz path/to/vmlinuz.sig
|
|
||||||
---#imgverify initrd path/to/initrd.sig
|
|
||||||
---DONE, partially. need to incorporate codesign certs/keys. routines, conf variables
|
|
||||||
-enable mirror= kernel commandline.
|
|
||||||
-NOTE: Following should be implemented via AIF-NG (https://git.square-r00t.net/AIF-NG, work pending for fix to BDisk for i686/x86_64 split)
|
|
||||||
--if mirror_(NAME) is present, use that as repo name.
|
|
||||||
--if it starts with /, treat as mirrorlist (Include); otherwise use Server =
|
|
||||||
--if it has mirror_SIG-X, set signature options e.g. _SIG-N would be "SigLevel = Never"
|
|
||||||
-iPXE background support. sed -rf "${BASEDIR}/src/ipxe_local/script.sed" ${SRCDIR}/ipxe/src/config/general.h ; sed -rf "${BASEDIR}/src/ipxe_local/script2.sed" ${SRCDIR}/ipxe/src/config/console.h
|
|
||||||
--note that iPXE VESAFB console is not (yet) supported in EFI, so this is on hold. check into this to see if it has changed.
|
|
||||||
-include WinMTR, build Mac OS X MTR for dist/tools on CD
|
|
||||||
-include pre-compiled LibreCrypt for opening LUKS parts on Windows (https://github.com/t-d-k/LibreCrypt)
|
|
||||||
--curl -s https://raw.githubusercontent.com/t-d-k/LibreCrypt/master/README.md | egrep 'InstallLibreCrypt_v[A-Za-z0-9\.]*.exe' | cut -f2 -d'"'
|
|
@ -1,6 +0,0 @@
|
|||||||
include::USER.adoc[]
|
|
||||||
include::DEV.adoc[]
|
|
||||||
include::BOOT.adoc[]
|
|
||||||
include::FURTHER.adoc[]
|
|
||||||
include::FAQ.adoc[]
|
|
||||||
include::FOOT.adoc[]
|
|
@ -1,8 +0,0 @@
|
|||||||
= Netboot
|
|
||||||
[partintro]
|
|
||||||
.What good is software if you can't see it in action?
|
|
||||||
--
|
|
||||||
It's possible to netboot my personal build of BDisk. I mostly keep this up for emergencies in case I need it, but it's good to show you that yes, you can boot a 2GB+ squashed and compressed filesystem from a <50MB image file.
|
|
||||||
--
|
|
||||||
|
|
||||||
include::netboot/HOWTO.adoc[]
|
|
@ -1,8 +0,0 @@
|
|||||||
= Developer Manual
|
|
||||||
[partintro]
|
|
||||||
.What good is software if nobody changes it?
|
|
||||||
--
|
|
||||||
BDisk can be sourced for other projects, as it's written in a modular manner. Future versions may support installation as a normal Python module. This will also provide information you may need to change parts of BDisk -- it *is* opensource, after all!
|
|
||||||
--
|
|
||||||
|
|
||||||
include::dev/FUNCTIONS.adoc[]
|
|
@ -1,8 +0,0 @@
|
|||||||
= FAQ
|
|
||||||
[partintro]
|
|
||||||
.What good is software if nobody understands it?
|
|
||||||
--
|
|
||||||
Here you will find some answers to Frequently Asked Questions I've received about this project. Please be sure to check this list before <<FURTHER.adoc#_bug_reports_feature_requests, opening a bug report>> or sending a patch!
|
|
||||||
--
|
|
||||||
|
|
||||||
include::faq/INDEX.adoc[]
|
|
@ -1,8 +0,0 @@
|
|||||||
//[appendix]
|
|
||||||
//= User Manual
|
|
||||||
//[appendix]
|
|
||||||
//= Developer Manual
|
|
||||||
//[appendix]
|
|
||||||
//= Netboot
|
|
||||||
//[appendix]
|
|
||||||
//= Bug Reports/Feature Requests
|
|
@ -1,11 +0,0 @@
|
|||||||
= Further Reading/Resources
|
|
||||||
[partintro]
|
|
||||||
.What good is software if you can't interact?
|
|
||||||
--
|
|
||||||
Here you will find further info, other resources, and such relating to BDisk.
|
|
||||||
--
|
|
||||||
|
|
||||||
include::further/PASSWORDS.adoc[]
|
|
||||||
include::further/BUGS.adoc[]
|
|
||||||
include::further/CONTACT.adoc[]
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
|||||||
= BDisk User and Developer Manual
|
|
||||||
Brent Saner <bts@square-r00t.net>
|
|
||||||
v1.3, 2017-08-20
|
|
||||||
:doctype: book
|
|
||||||
:data-uri:
|
|
||||||
:imagesdir: images
|
|
||||||
:sectlinks:
|
|
||||||
:toc: preamble
|
|
||||||
:toc2: left
|
|
||||||
:idprefix:
|
|
||||||
:sectnums:
|
|
||||||
:toclevels: 2
|
|
||||||
// So there's currently a "bug" in that the TOC will display with continued numbering across parts.
|
|
||||||
// i essentially want the opposite of https://github.com/asciidoctor/asciidoctor/issues/979 TODO
|
|
||||||
|
|
||||||
[dedication]
|
|
||||||
= Thanks
|
|
||||||
See CREDITS in the project source for a list of thanks.
|
|
||||||
|
|
||||||
|
|
||||||
[preface]
|
|
||||||
= Preface
|
|
||||||
=== About the Author
|
|
||||||
I am a GNU/Linux Systems/Network Administrator/Engineer- I wear a lot of hats. I have a lot of side projects to keep me busy when I’m not working at _${dayjob}_, mostly to assist in other side projects and become more efficient and proficient at those tasks. “Shaving the yak,” footnote:[See http://catb.org/jargon/html/Y/yak-shaving.html] indeed.
|
|
||||||
|
|
||||||
A lot of research went into how low-level boot operations take place when writing BDisk, both in BIOS and UEFI footnote:[*Unified Extensible Firmware Interface.* UEFI is not BIOS, and BIOS is not UEFI.] (and corresponding concepts such as Secureboot, etc.) which is no easy task to understand and very commonly misunderstood. (For instance, a common misconception is that UEFI necessarily implies Secureboot. This is quite far from the truth and UEFI by itself is quite a useful replacement for BIOS). I invite you to do research into the specifications yourself; it's rather fascinating.
|
|
||||||
|
|
||||||
|
|
||||||
=== What is BDisk?
|
|
||||||
BDisk refers to both a live distribution I use in my own uses (for rescue situations, recovery, etc.) but foremost and most importantly, it also refers to the tool I use for building that distribution. In other words, it's both a complete GNU/Linux distribution you can run entirely from USB/CD/DVD/etc. (without needing to install it to your hard drive)... and also the name of a tool to create a custom GNU/Linux install. The latter is what this project and documentation refer to when the word “BDisk” is used.
|
|
||||||
|
|
||||||
This documentation was started when I rewrote BDisk in Python 3.x; versions 0.x-2.x of BDisk were written in Bash, and horribly inelegant and rigid. footnote:[I should take the time to note that I am still quite new to Python so expect there to be plenty of optimizations to be made and general WTF-ery from seasoned Python developers. If you encounter any bugs or improvements, please <<FURTHER.adoc#_bug_reports_feature_requests,report them>>! It'd be much appreciated.] One of my main goals was to make BDisk as easy to use as possible. This is surprisingly hard to do- it’s quite challenging to try to approach software you’ve written with the mindset of someone other than you.
|
|
||||||
|
|
||||||
It’s my hope that by releasing this utility (and documenting it), you can use it and save some time for yourself as well (and hopefully get the chance to learn a bit more in the process!).
|
|
||||||
|
|
||||||
It of course is not the <<i_don_t_like_bdisk_are_there_any_other_alternatives,only live media creator>> out there, but most others only focus on remastering an existing ISO, or creating an installer ISO -- not creating a custom live-centric environment.
|
|
||||||
|
|
||||||
|
|
||||||
=== Copyright/Licensing
|
|
||||||
The BDisk code is https://www.gnu.org/licenses/gpl-3.0.en.html[GPLv3-licensed^]. This means that you can use it for business reasons, personal reasons, modify it, etc. Please be sure to familiarize yourself with the full set of terms. You can find the full license in `docs/LICENSE`.
|
|
||||||
|
|
||||||
image::https://www.gnu.org/graphics/gplv3-127x51.png[GPLv3,align="center"]
|
|
||||||
|
|
||||||
This document, and all other associated author-generated documentation, are released under the http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons CC-BY-SA 4.0^] copyright. It's essentially the GPL for non-software, so similar terms apply.
|
|
||||||
|
|
||||||
image::https://i.creativecommons.org/l/by-sa/4.0/88x31.png[CC-BY-SA_4.0,align="center"]
|
|
||||||
|
|
||||||
include::BODY.adoc[]
|
|
@ -1,26 +0,0 @@
|
|||||||
= User Manual
|
|
||||||
|
|
||||||
[partintro]
|
|
||||||
.What good is software if nobody uses it?
|
|
||||||
--
|
|
||||||
BDisk was ultimately designed to make your life easier. "Why would I possibly need yet another LiveCD/LiveUSB?" Well, that's sort of the point- by customizing a live distribution of GNU/Linux to _your_ particular needs/desires/whimsy, you can do away with the multiple other images you keep around. It's designed to let you create a fully customized distribution.
|
|
||||||
|
|
||||||
Using BDisk, you can:
|
|
||||||
|
|
||||||
* Install GNU/Linux (https://wiki.archlinux.org/index.php/installation_guide[Arch^], https://watchmysys.com/blog/2015/02/installing-centos-7-with-a-chroot/[CentOS^], https://www.debian.org/releases/stable/amd64/apds03.html.en[Debian^], https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo^], https://help.ubuntu.com/lts/installation-guide/powerpc/apds04.html[Ubuntu^]...). BDisk may be Arch-based, but many if not most other distros offer ways to install from any GNU/Linux live distribution.
|
|
||||||
* Perform disk maintenance (https://raid.wiki.kernel.org/index.php/RAID_setup[mdadm^], fdisk / http://www.rodsbooks.com/gdisk/[gdisk^], http://gparted.org/[gparted^], https://www.thomas-krenn.com/en/wiki/StorCLI[storcli^], etc.). Need to replace that disk in your RAID and you don't have hotswap? Not a problem!
|
|
||||||
* Rescue, recover, wipe (http://www.sleuthkit.org/sleuthkit/[scalpel^], http://www.andybev.com/index.php/Nwipe[nwipe^], http://foremost.sourceforge.net/[foremost^], etc.). Chances are this is why you booted a live distro in the first place, yes?
|
|
||||||
* Boot over the Internet (or LAN). Burning a new image to CD/DVD/USB is a pain. BDisk has built-in support for http://ipxe.org/[iPXE^] (and traditional PXE setups). Update the filesystem image once, deploy it everywhere.
|
|
||||||
* And much, much more.
|
|
||||||
** Seriously.
|
|
||||||
|
|
||||||
This manual will give you the information you need to build your very own live GNU/Linux distribution.
|
|
||||||
--
|
|
||||||
|
|
||||||
include::user/GETTING_STARTED.adoc[]
|
|
||||||
include::user/IMPORTANT_CONCEPTS.adoc[]
|
|
||||||
include::user/PROJECT_LAYOUT.adoc[]
|
|
||||||
include::user/BUILDINI.adoc[]
|
|
||||||
include::user/ADVANCED.adoc[]
|
|
||||||
include::user/BUILDING.adoc[]
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
|||||||
== Layout of BDisk functions
|
|
||||||
These functions exist in <<_bdisk_,`bdisk/`>>.
|
|
||||||
|
|
||||||
include::functions/BCHROOT.adoc[]
|
|
||||||
|
|
||||||
=== `bdisk.py`
|
|
||||||
This file is a sort of "wrapper" -- it pulls all the other files in this directory together into a single usable Python script. In other words, to build a BDisk distribution, you would simply run `bdisk/bdisk.py` -- that's it! See <<building_a_bdisk_iso>>.
|
|
||||||
|
|
||||||
It contains no functions, it just contains minimal logic to tie all the other functions together.
|
|
||||||
|
|
||||||
include::functions/BGPG.adoc[]
|
|
||||||
|
|
||||||
include::functions/BSSL.adoc[]
|
|
||||||
|
|
||||||
include::functions/BSYNC.adoc[]
|
|
||||||
|
|
||||||
include::functions/BUILD.adoc[]
|
|
||||||
|
|
||||||
include::functions/HOST.adoc[]
|
|
||||||
|
|
||||||
include::functions/IPXE.adoc[]
|
|
||||||
|
|
||||||
include::functions/PREP.adoc[]
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
|||||||
=== `bchroot.py`
|
|
||||||
This file controls creation of the chroots -- the directories in which BDisk builds the actual system that is booted into.
|
|
||||||
|
|
||||||
==== chroot(_chrootdir_, _chroot_hostname_, _cmd_ = '`/root/pre-build.sh`')
|
|
||||||
This function manages mounting the mountpoints for the chroot(s) in preparation for the images of the live media. It also runs <<changing_the_build_process,the inner chroot preparation script>>. Returns `chrootdir` (same as the paramater provided).
|
|
||||||
|
|
||||||
===== chrootdir
|
|
||||||
The directory where the filesystem tree for the chroot lies. Absolute path only.
|
|
||||||
|
|
||||||
===== chroot_hostname
|
|
||||||
The hostname to use for the guest.
|
|
||||||
|
|
||||||
NOTE: This paramater may be removed in future versions.
|
|
||||||
|
|
||||||
===== cmd
|
|
||||||
The command to run inside the chroot once all the mountpoints are set up.
|
|
||||||
|
|
||||||
==== chrootUnmount(_chrootdir_)
|
|
||||||
Unmount the mounts set up in <<chroot_em_chrootdir_em_em_chroot_hostname_em_em_cmd_em_root_pre_build_sh,chroot()>>.
|
|
||||||
|
|
||||||
===== chrootdir
|
|
||||||
See <<chrootdir>>.
|
|
||||||
|
|
||||||
==== chrootTrim(_build_)
|
|
||||||
This function performs some cleanup and optimizations to the chroot(s).
|
|
||||||
|
|
||||||
===== build
|
|
||||||
A dictionary of <<code_build_code>>'s values (with some additional keys/values added). See <<parseconfig_em_confs_em,parseConfig()>>.
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
|||||||
=== `bGPG.py`
|
|
||||||
This contains functions having to do with GPG -- signing files, verifying other signatures, generating a key (if one wasn't specified), using a key (if one was specified), etc.
|
|
||||||
|
|
||||||
==== genGPG(_conf_)
|
|
||||||
This function controls generating (or "importing" an existing) GnuPG key for use with other operations. Returns `gpg`, a <<optional,PyGPGME>> object.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
A dictionary of the <<the_code_build_ini_code_file,configuration>> (with some additional keys/values added). See (TODO: link to host.py's config parser).
|
|
||||||
|
|
||||||
==== killStaleAgent(_conf_)
|
|
||||||
This function kills off any stale GnuPG agents running. Not doing so can cause some strange behaviour both during the build process and on the host.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== signIMG(_path_, _conf_)
|
|
||||||
This function signs a given file with the keys BDisk was either configured to use or automatically generated.
|
|
||||||
|
|
||||||
===== path
|
|
||||||
The full, absolute path to the file to be signed. An https://www.gnupg.org/gph/en/manual/r1290.html[ASCII-armored^] https://www.gnupg.org/gph/en/manual/x135.html[detached^] signature (plaintext) will be generated at `_path_.asc`, and a binary detached signature will be generated at `_path_.sig`.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== gpgVerify(_sigfile_, _datafile_, _conf_)
|
|
||||||
This function verifies a detatched signature against a file containing data. Returns *True* if the file verifies, or *False* if not.
|
|
||||||
|
|
||||||
===== sigfile
|
|
||||||
The detached signature file. Can be ASCII-armored or binary format. Full/absolute path only.
|
|
||||||
|
|
||||||
===== datafile
|
|
||||||
The file containing the data to be verified. Full/absolute path only.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== delTempKeys(_conf_)
|
|
||||||
Delete automatically-generated keys (if we generated them) as well as the automatically imported verification key (<<code_gpgkey_code>>).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
@ -1,64 +0,0 @@
|
|||||||
=== `bSSL.py`
|
|
||||||
Functions having to do with OpenSSL are stored here. This is used primarily for "mini" builds (via iPXE), they let you boot your BDisk distribution over the Internet. If an SSL key, CA certificate, etc. weren't defined and you want to build a mini image, this file contains functions that will build an SSL PKI (public key infrastructure) for you automatically.
|
|
||||||
|
|
||||||
==== verifyCert(_cert_, _key_, _CA_ = None)
|
|
||||||
This function will verify a certificate's validity/pairing with a key, optionally against a given CA certificate. Returns *True* on successful verification, or *False* and an exit (for sanity purposes).
|
|
||||||
|
|
||||||
===== cert
|
|
||||||
The certificate to be validated. Must be a PyOpenSSL certificate object.
|
|
||||||
|
|
||||||
===== key
|
|
||||||
The key to validate against <<cert>>. Must be a PyOpenSSL key object.
|
|
||||||
|
|
||||||
===== CA
|
|
||||||
The CA, or certificate authority, certificate to verify against.
|
|
||||||
|
|
||||||
NOTE: This currently does not work, as PyOpenSSL does not currently support verifying against a specified CA certificate.
|
|
||||||
|
|
||||||
==== sslCAKey(_conf_)
|
|
||||||
This function imports a CA key (<<code_ssl_cakey_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL key object.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== sslCA(_conf_, _key_ = None)
|
|
||||||
This function imports a CA certificate (<<code_ssl_ca_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL certificate object.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
===== key
|
|
||||||
A PyOpenSSL key object that should be used to generate the CA certificate (or is paired to the CA certificate if specified).
|
|
||||||
|
|
||||||
==== sslCKey(_conf_)
|
|
||||||
This function imports a client key (<<code_ssl_key_code>>) into a PyOpenSSL object (or generates one if necessary). Returns a PyOpenSSL key object.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== ssslCSR(_conf_, _key_ = None)
|
|
||||||
This function generates a CSR (certificate signing request).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
===== key
|
|
||||||
A PyOpenSSL key object that should be used to generate the CSR. It should be a key that is paired to the client certificate.
|
|
||||||
|
|
||||||
==== sslSign(_conf_, _ca_, _key_, _csr_)
|
|
||||||
This function signs a CSR using a specified CA.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
===== ca
|
|
||||||
A PyOpenSSL certificate object for the CA certificate. This certificate (object) should have signing capabilities.
|
|
||||||
|
|
||||||
===== key
|
|
||||||
A PyOpenSSL key object paired to <<ca_2>>.
|
|
||||||
|
|
||||||
===== csr
|
|
||||||
A PyOpenSSL CSR object. See <<ssslcsr_em_conf_em_em_key_em_none,sslCSR()>>.
|
|
||||||
|
|
||||||
==== sslPKI(_conf_)
|
|
||||||
Ties all the above together into one convenient function. Returns a PyOpenSSL certificate object of the signed client certificate.
|
|
@ -1,26 +0,0 @@
|
|||||||
=== `bsync.py`
|
|
||||||
This file has functions relating to copying your BDisk build to various resources. For instance, if you want your ISO available to download then this file would be used to copy your finished build to an HTTP server/root you specify.
|
|
||||||
|
|
||||||
==== http(_conf_)
|
|
||||||
This function prepares a *local* HTTP directory, or webroot. See <<code_http_code_2>>.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== tftp(_conf_)
|
|
||||||
This function prepares a *local* TFTP directory (for traditional PXE). See <<code_tftp_code_2>>.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== git(_conf_)
|
|
||||||
This function commits (and pushes) any changes you might have made to your project (<<code_basedir_code>>) automatically.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== rsync(_conf_)
|
|
||||||
This function syncs your builds, HTTP directory (if enabled), TFTP directory (if enabled), etc. to a remote host. See <<code_rsync_code_2>>.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
@ -1,43 +0,0 @@
|
|||||||
=== `build.py`
|
|
||||||
This is responsible for building the "full" ISO, building UEFI support, etc.
|
|
||||||
|
|
||||||
==== genImg(_conf_)
|
|
||||||
This function builds the http://tldp.org/HOWTO/SquashFS-HOWTO/creatingandusing.html[squashed filesystem^] images and, <<code_gpg_code,if requested>>, signs them.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== genUEFI(_build_, _bdisk_)
|
|
||||||
This function builds UEFI support for the ISO files. Returns the path of an embedded EFI bootable binary/ESP image.
|
|
||||||
|
|
||||||
===== build
|
|
||||||
The <<code_build_code,build section>> of the configuration.
|
|
||||||
|
|
||||||
===== bdisk
|
|
||||||
The <<code_bdisk_code,bdisk section>> of the configuration.
|
|
||||||
|
|
||||||
==== genISO(_conf_)
|
|
||||||
Builds the full ISO image(s). Returns a dictionary of information about the built ISO file (see <<iso>>).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== displayStats(_iso_)
|
|
||||||
Parses the output of e.g. <<geniso_em_conf_em,genISO()>> and displays in a summary useful to the end-user.
|
|
||||||
|
|
||||||
===== iso
|
|
||||||
A dictionary of information about the ISO file. This is typically:
|
|
||||||
|
|
||||||
{'iso':
|
|
||||||
{'name':<'Main' for the full ISO, 'Mini' for the mini ISO, etc.>},
|
|
||||||
{<name>:
|
|
||||||
'sha':<SHA256 sum of ISO file>,
|
|
||||||
'file':<full/absolute path to ISO file>,
|
|
||||||
'size':<size, in "humanized" format (e.g. #GB, #MB, etc.)>,
|
|
||||||
'type':<Full or Mini>,
|
|
||||||
'fmt':<human readable ISO type. e.g. Hybrid for an image that can be burned directly to a disk via dd or burned to optical media>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
==== cleanUp()
|
|
||||||
Currently a no-op; this function is reserved for future usage to cleanup the build process automatically.
|
|
@ -1,42 +0,0 @@
|
|||||||
=== `host.py`
|
|
||||||
These functions are used to perform "meta" tasks such as get information about the build host, find <<the_code_build_ini_code_file,the `build.ini` file>>, and parse your configuration options.
|
|
||||||
|
|
||||||
==== getOS()
|
|
||||||
Returns the distribution of the build host.
|
|
||||||
|
|
||||||
==== getBits()
|
|
||||||
Returns the "bitness" of the build host (e.g. `32bit` or `64bit`)
|
|
||||||
|
|
||||||
==== getHostname()
|
|
||||||
Returns the hostname of the build host.
|
|
||||||
|
|
||||||
==== getConfig(_conf_file_ = '/etc/bdisk/build.ini')
|
|
||||||
Returns a list of:
|
|
||||||
|
|
||||||
. the default configuration file
|
|
||||||
. the user-specified configuration file
|
|
||||||
|
|
||||||
===== conf_file
|
|
||||||
This is a full/absolute path that is searched first. If it exists and is a file, it is assumed to be the "canonical" <<the_code_build_ini_code_file,`build.ini` file>>.
|
|
||||||
|
|
||||||
==== parseConfig(_confs_)
|
|
||||||
This function parses the configuration file(s) and returns a list of:
|
|
||||||
|
|
||||||
. A ConfigParser object
|
|
||||||
. The configuration as a dictionary
|
|
||||||
|
|
||||||
It performs some additional things, such as:
|
|
||||||
|
|
||||||
* Converts "boolean" operations to true Python booleans
|
|
||||||
* Tries to automatically detect the version if one isn't provided
|
|
||||||
* Establishes the build number (this is a number that should be local to the build host)
|
|
||||||
* Forms a list of the <<code_multiarch_code,architectures>> to build
|
|
||||||
* Validates:
|
|
||||||
** The bootstrap tarball mirror
|
|
||||||
** The rsync destination (if <<code_rsync_code,enabled>>)
|
|
||||||
** The iPXE remote URI (if <<code_ipxe_code,enabled>>)
|
|
||||||
** That <<code_basedir_code>> is correctly set
|
|
||||||
* Makes prerequisite directories
|
|
||||||
|
|
||||||
===== confs
|
|
||||||
A list of configuration files. See <<getconfig_em_conf_file_em_etc_bdisk_build_ini,getConfig()>>.
|
|
@ -1,22 +0,0 @@
|
|||||||
=== `ipxe.py`
|
|
||||||
This file handles building the "mini" ISO via iPXE.
|
|
||||||
|
|
||||||
==== buildIPXE(_conf_)
|
|
||||||
This function builds the iPXE core files.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== genISO(_conf_)
|
|
||||||
This function builds the mini ISO (if <<code_iso_code,enabled>>). Returns a dictionary of information about the built ISO file (see <<iso>>).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== tftpbootEnv(_conf_)
|
|
||||||
This function configures a TFTP boot/root directory for traditional PXE setups.
|
|
||||||
|
|
||||||
NOTE: This function currently is a no-op; it will be implemented in future versions.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
@ -1,52 +0,0 @@
|
|||||||
=== `prep.py`
|
|
||||||
This contains functions that download the base tarball releases, preps them for <<code_bchroot_py_code>>, builds necessary directory structures, and performs the overlay preparations.
|
|
||||||
|
|
||||||
==== dirChk(_conf_)
|
|
||||||
This function creates extra directories if needed.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== downloadTarball(_conf_)
|
|
||||||
This function downloads the tarball (<<code_mirrorfile_code>>) from the <<code_mirror_code>>, and performs verifications (SHA1 and GPG signature <<code_mirrorgpgsig_code,if enabled>>). Returns the full/absolute path to the downloaded tarball.
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== unpackTarball(_tarball_path_, _build_, _keep_ = False)
|
|
||||||
This function extracts the tarball downloaded via <<downloadtarball_em_conf_em,downloadTarball()>>.
|
|
||||||
|
|
||||||
===== tarball_path
|
|
||||||
The full/absolute path to the downloaded tarball.
|
|
||||||
|
|
||||||
===== build
|
|
||||||
See <<build>>.
|
|
||||||
|
|
||||||
===== keep
|
|
||||||
`True` or `False`. Whether we should keep the downloaded tarball after unpacking/extracting. If your upstream tarball changes often enough, it's recommended to set this to `False`. However, setting it to `True` can speed up the build process if you're on a slower Internet connection.
|
|
||||||
|
|
||||||
==== buildChroot(_conf_, _keep_ = False)
|
|
||||||
This incorporates <<downloadtarball_em_conf_em,donwloading>> and <<unpacktarball_em_tarball_path_em_em_build_em_em_keep_em_false,extracting>> into one function, as well as applying the <<pre_build_d>> directory (and the <<pre_build_d_2,pre-build.d templates>>).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
===== keep
|
|
||||||
See <<keep>>.
|
|
||||||
|
|
||||||
==== prepChroot(_conf_)
|
|
||||||
Returns a modified/updated <<build>>. This function:
|
|
||||||
|
|
||||||
. Prepares some variables that <<changing_the_build_process,pre-build.sh>> needs inside the chroot(s)
|
|
||||||
. Builds <<version_info_txt_j2,the VERSION_INFO.txt file>>
|
|
||||||
. Updates the build number
|
|
||||||
. Imports the <<code_mygpgkey_code,signing GPG key>>
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
||||||
|
|
||||||
==== postChroot(_conf_)
|
|
||||||
This function applies the <<overlay_2>> directory (and the <<overlay,overlay templates>>).
|
|
||||||
|
|
||||||
===== conf
|
|
||||||
See <<conf>>.
|
|
@ -1,83 +0,0 @@
|
|||||||
== I don't like BDisk. Are there any other alternatives?
|
|
||||||
First, I'm sorry to hear that BDisk doesn't suit your needs. If you want any features you think are missing or encounter any <<FURTHER.adoc#bug_reports_feature_requests, bugs>>, please report them!
|
|
||||||
|
|
||||||
But yes; there are plenty of alternatives!
|
|
||||||
|
|
||||||
NOTE: Only *currently maintained projects* are listed here.
|
|
||||||
|
|
||||||
=== https://wiki.archlinux.org/index.php/archboot[Archboot^]
|
|
||||||
Written in Bash
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|Pros|Cons
|
|
||||||
|Highly featureful|Arch-only live media
|
|
||||||
|Includes an assisted Arch install script|Inaccessible to non-Arch users
|
|
||||||
|Can create tarballs too|Not very customizable by default
|
|
||||||
|Supports hybrid ISOs|Infrequent stable releases
|
|
||||||
|Supports PXE-booting infrastructure|Requires a systemd build host
|
|
||||||
|Supports SecureBoot|Not a secure setup by default
|
|
||||||
|Supports GRUB2's "ISO-loopback" mode|Builds a much larger image
|
|
||||||
|Official Arch project|Some graphical bugs
|
|
||||||
||Much more disk space is necessary for the tool itself
|
|
||||||
||*Only* runs in RAM, so not ideal for RAM-constrained systems
|
|
||||||
||Based on/requires an Arch build host
|
|
||||||
||Requires an x86_64 build host
|
|
||||||
||Has a large amount of dependencies
|
|
||||||
||Manual intervention required for build process
|
|
||||||
||Minimal documentation
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== https://wiki.archlinux.org/index.php/archiso[Archiso^]
|
|
||||||
Written in Bash.
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|Pros|Cons
|
|
||||||
|Used to build the official Arch ISO|Requires an x86_64 build host
|
|
||||||
|Supports custom local on-disk repositories|Not very featureful as far as customization goes
|
|
||||||
|Supports arbitrary file placement in finished image|Requires an Arch build host
|
|
||||||
|Supports hybrid ISOs|Has odd quirks with package selection
|
|
||||||
|Supports Secureboot|Manual intervention required for build process
|
|
||||||
|Official Arch project|Does not start networking by default
|
|
||||||
|Can run in RAM or from media|Very minimal environment
|
|
||||||
||Arch-only live meda
|
|
||||||
||Documentation is lacking
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== Debian's https://wiki.debian.org/Simple-CDD[Simple-CDD^]
|
|
||||||
Written in Bash (some Python).
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|Pros|Cons
|
|
||||||
|Supports custom packages to be installed|Very limited -- no customization beyond package listing
|
|
||||||
|Lightweight; quick to set up|Takes a long time for preparation; requires a clone of many .deb packages first.
|
|
||||||
||Doesn't seem to work as according to https://wiki.debian.org/Simple-CDD/Howto[the documentation^]
|
|
||||||
||Documentation is sparse
|
|
||||||
||Full featureset unknown due to ISO not building on Debian Jessie (8.0)
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== Fedora's https://fedoraproject.org/wiki/Livemedia-creator-_How_to_create_and_use_a_Live_CD[Livemedia-creator^]
|
|
||||||
Written in Bash.
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|Pros|Cons
|
|
||||||
|Somewhat customizable|Requires manual initialization of chroot(s) via https://github.com/rpm-software-management/mock/wiki[mock^]
|
|
||||||
|Uses kickstart configurations|*Requires* a kickstart configuration to be useful
|
|
||||||
|Simple/easy to use|Full featureset unknown; documentation is sparse
|
|
||||||
||Limited configuration/customization
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== https://github.com/rhinstaller/livecd-tools[LiveCD Tools^]
|
|
||||||
Written in Python 2, some Bash.
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|Pros|Cons
|
|
||||||
|Can use kickstarts|*Requires* a kickstart configuration
|
|
||||||
|Simple/easy to use to use|Limited configuration/customization
|
|
||||||
|Automatically builds chroots|Full featureset unknown; documentation is sparse
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
== How do I get the version/build of an ISO?
|
|
||||||
This can be found in a multitude of places. The full-size ISO file (iso/<distname>-<git tag>-<git rev number>-(32|64|any).iso) should have the version right in the filename. If you want more detailed information (or perhaps you renamed the file), you can mount the ISO as loopback in GNU/Linux, *BSD, or Mac OS X/macOS and check `/path/to/mounted/iso/VERSION_INTO.txt`. Lastly, within the runtime itself (especially handy if booting via iPXE), you can check `/root/VERSION_INFO.txt` to get information about the build of the currently running live environment.
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
|||||||
include::WHYARCH.adoc[]
|
|
||||||
include::LONGTIME.adoc[]
|
|
||||||
include::ISOBIG.adoc[]
|
|
||||||
include::GETVERSION.adoc[]
|
|
||||||
include::ALTERNATIVES.adoc[]
|
|
@ -1,5 +0,0 @@
|
|||||||
== Why is the ISO so large?
|
|
||||||
This actually entirely depends on what <<changing_the_installed_software,packages you have chosen to install>> (and if you're building a <<code_multiarch_code,multiarch ISO>>). The default list is quite large.
|
|
||||||
|
|
||||||
If you build a minimal ISO (i.e. only the necessary components required for booting and nothing else, single-arch), the ISO is only about 570MB (but work is being done to make this even smaller).
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
|||||||
== Why does building take so long?
|
|
||||||
This typically occurs when you're building from within a LiveCD/LiveUSB situation, in a VM/container/etc., or on a headless server. If this is the case, you may run into what appears to be "stalling", especially while keys are generating for the chroots. Thankfully, there is an easy fix. You can install http://www.issihosts.com/haveged/[haveged^] and run it (this can be done safely while a build is executing). This will show an immediate and non-negligible improvement for the above contexts. If you have extra processing power to throw at the build process (or are using a dedicated build box) as well, I recommend enabling <<code_i_am_a_racecar_code,`i_am_a_racecar`>>. BDisk will then be more aggressive with its resource consumption.
|
|
||||||
|
|
||||||
=== Running a local mirror
|
|
||||||
Keep in mind also that the more packages you opt to install, the longer the build process will take. This process will also use quite a fair bit of bandwidth. If you plan on building regular images (e.g. nightly builds, etc.) or are undergoing some custom change testing, I recommend running a private repository mirror on-site. This will not store AUR packages, as those will still be fetched and built (documentation on working around this is TODO) but setting up a local mirror is quite quick and easy.
|
|
||||||
|
|
||||||
First, you'll need at least 70Gb of free disk space. Let's say our repository clone will be at `/srv/repo/arch/`.
|
|
||||||
|
|
||||||
You'll also need to find an Arch mirror, ideally one close to you that is up-to-date. The https://www.archlinux.org/mirrorlist/[mirrorlist generator^] and https://www.archlinux.org/mirrors/[mirror list^] will assist you here greatly.
|
|
||||||
|
|
||||||
NOTE: You'll need to find a mirror that supports _rsync_.
|
|
||||||
|
|
||||||
TIP: You can use ANY distro to run a repository mirror, as long as it has _rsync_ installed!
|
|
||||||
|
|
||||||
==== Set up the sync
|
|
||||||
|
|
||||||
Create a script and mark it as executable with the following content:
|
|
||||||
|
|
||||||
#!/bin/bash
|
|
||||||
SOURCE='rsync://your.mirror.here/archlinux'
|
|
||||||
DEST='/srv/repo/arch'
|
|
||||||
LCK_FLE='/var/run/repo-sync.lck'
|
|
||||||
PATH='/usr/bin'
|
|
||||||
if [ -e "${LCK_FLE}" ] ; then
|
|
||||||
OTHER_PID=$(cat ${LCK_FLE})
|
|
||||||
echo "Another instance already running: ${OTHER_PID}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# If e.g. /srv/repo is a mountpoint, uncomment below.
|
|
||||||
#findmnt /srv/repo > /dev/null 2>&1
|
|
||||||
#if [[ "${?}" -ne '0' ]];
|
|
||||||
#then
|
|
||||||
# echo "External storage not mounted!"
|
|
||||||
# exit 1
|
|
||||||
#fi
|
|
||||||
echo $$ > "${LCK_FLE}"
|
|
||||||
rsync -rvtlH --delete-after --delay-updates --safe-links --max-delete=1000 ${SOURCE}/. ${DEST}/. >> /var/log/arch.repo.sync 2>&1
|
|
||||||
date +%s > ${DEST}/lastsync
|
|
||||||
rm -f "${LCK_FLE}"
|
|
||||||
|
|
||||||
Assuming you want to run the sync script every 6 hours and it is located at `/root/bin/arch.repo.clone.sh`, this is the cron entry you would use (`crontab -e`):
|
|
||||||
|
|
||||||
0 */6 * * * /root/bin/arch.repo.clone.sh > /dev/null 2>&1
|
|
||||||
|
|
||||||
The first sync can take quite a while, but subsequent runs shouldn't take more than five minutes or so.
|
|
||||||
|
|
||||||
==== Configuring the local mirror
|
|
||||||
You'll need a way to serve this local mirror in a way pacman can understand. Luckily, it's fairly easy. I recommend using https://www.nginx.com/[nginx^] as it's available by default in many operating systems. You can of course use others such as https://www.lighttpd.net/[lighttpd^], https://httpd.apache.org/[apache/httpd^], etc. For the example configuration here, we're going to use an nginx configuration file.
|
|
||||||
|
|
||||||
```
|
|
||||||
server {
|
|
||||||
listen [::]:80;
|
|
||||||
access_log /var/log/nginx/repo.access.log main;
|
|
||||||
error_log /var/log/nginx/repo.error.log;
|
|
||||||
#error_log /var/log/nginx/repo.error.log debug;
|
|
||||||
|
|
||||||
autoindex on;
|
|
||||||
|
|
||||||
root /srv/repo/arch;
|
|
||||||
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The configuration may vary according to your distribution's provided nginx default configuration, but you'll want this configuration to be served as the default (or set an appropriate `https://nginx.org/en/docs/http/server_names.html[server_name]` directive which you would then use in `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist`).
|
|
||||||
|
|
||||||
==== Configuring BDisk
|
|
||||||
|
|
||||||
You'll then want to configure BDisk's chroots to use your local mirror first. However, when doing so you run into an issue -- in the built image, install operations will take longer than they need to because the local mirror likely won't be available! This is a small issue as it's unexpected that you'll need to install software within the live environment, but I've run into cases where it was a necessity once or twice.
|
|
||||||
|
|
||||||
There is an https://devblog.square-r00t.net/articles/libvirt-spoof-domains-dns-records-redirect-to-another-ip[easy workaround^] if you're using libvirt -- you can simply tell your build VM to resolve to the IP address of the box that is running the mirror for the same FQDN that the "preferred" "real" mirror on the Internet is and set that mirror at the top of `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist`. However, that's not always feasible- most notably if you're building on a physical box and it's the same host as the repository clone. In that case you can set the specific local resolution -- e.g. `http://127.0.0.1/` -- at the top of `<basedir>/extra/pre-build.d/etc/pacman.d/mirrorlist` and then set a mirrorlist WITHOUT that entry in `<basedir>/overlay/etc/pacman.d/mirrorlist`. For more information on using these type of overrides, see <<advanced_customization>>.
|
|
||||||
|
|
||||||
If you're using the libvirt workaround, remember to configure nginx (or whatever you're using) with a virtual host and location block that matches the "real", upstream mirror. In our example below, we use *http://mirror.us.leaseweb.net/archlinux* as the mirror.
|
|
||||||
|
|
||||||
```
|
|
||||||
server {
|
|
||||||
listen [::]:80;
|
|
||||||
access_log /var/log/nginx/repo.access.log main;
|
|
||||||
error_log /var/log/nginx/repo.error.log;
|
|
||||||
#error_log /var/log/nginx/repo.error.log debug;
|
|
||||||
|
|
||||||
server_name mirror.us.leaseweb.net;
|
|
||||||
|
|
||||||
autoindex on;
|
|
||||||
|
|
||||||
root /srv/repo/arch;
|
|
||||||
|
|
||||||
location /archlinux {
|
|
||||||
autoindex on;
|
|
||||||
rewrite ^/archlinux(/.*)$ /$1;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
|||||||
== Why Arch Linux?
|
|
||||||
Because it's a very easy-to-use, simple, https://wiki.archlinux.org/[well-documented^] distro. It's no-frills and incredibly flexible/customizable, and can be made rather slim (and is out of the box, in fact). It's also very friendly to run as a chroot inside any other distro or as a chroot host to any other distro.
|
|
||||||
|
|
||||||
Plus they release monthly tarball snapshots that are fairly small and create quick bootstrap environments.
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
|||||||
== Bug Reports/Feature Requests
|
|
||||||
NOTE: It is possible to submit a bug or feature request without registering in my bugtracker. One of my pet peeves is needing to create an account/register on a bugtracker simply to report a bug! The following links only require an email address to file a bug (which is necessary in case I need any further clarification from you or to keep you updated on the status of the bug/feature request -- so please be sure to use a valid email address).
|
|
||||||
|
|
||||||
=== Bugs
|
|
||||||
If you encounter any bugs in *BDisk*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1&product_category=5[here^].
|
|
||||||
|
|
||||||
If you encounter any bugs (inaccurate information, typos, misformatting, etc.) in *this documentation*, you can file a bug report https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=1&product_category=26[here^].
|
|
||||||
|
|
||||||
=== Feature Requests
|
|
||||||
If you have any features you'd like to see or you think would help *BDisk* become even more useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2&product_category=5[here^].
|
|
||||||
|
|
||||||
If you have any suggestions on how to improve *this documentation* or feel it's missing information that could be useful, please file a feature request https://bugs.square-r00t.net/index.php?do=newtask&project=2&task_type=2&product_category=26[here^].
|
|
||||||
|
|
||||||
=== Patches
|
|
||||||
I gladly welcome https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[patches^], but I deplore using GitHub (even though I https://github.com/johnnybubonic/BDisk[have a mirror there^]). For this reason, please follow the same https://www.kernel.org/doc/Documentation/process/submitting-patches.rst[patch/pull request process] for the Linux kernel and email it to bts@square-r00t.net.
|
|
||||||
|
|
||||||
Alternatively, you may attach a patch to a <<bugs,bug report>>/<<feature_requests,feature request>>.
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
|||||||
== Contact the Author
|
|
||||||
If you have any questions, comments, or concerns, you can use the following information to get in touch with me.
|
|
||||||
|
|
||||||
I am available via mailto:bts@square-r00t.net[email]. If you use GPG, you can find my pubkey and other related info https://devblog.square-r00t.net/about/my-gpg-public-key-verification-of-identity[here^] (and on most keyservers).
|
|
||||||
|
|
||||||
I occasionally write howto articles, brief tips, and other information in my https://devblog.square-r00t.net[dev blog].
|
|
||||||
|
|
||||||
I am on IRC as *r00t^2*, and am usually in the irc://irc.freenode.org/#sysadministrivia[Sysadministrivia channel on Freenode]. Which reminds me, I run a podcast called https://sysadministrivia.com[Sysadministrivia^].
|
|
||||||
|
|
||||||
I am on Twitter as https://twitter.com/brentsaner[@brentsaner^], though I don't tweet very often. (I usually tweet from my https://twitter.com/SysAdm_Podcast[podcast's twitter^].)
|
|
@ -1,95 +0,0 @@
|
|||||||
== Passwords
|
|
||||||
NOTE: If you're specifying passwords, be sure to use a https://www.schneier.com/blog/archives/2014/03/choosing_secure_1.html[strong password^]!
|
|
||||||
|
|
||||||
=== `build.ini` Password Value Examples
|
|
||||||
Passwords work a little interestingly in BDisk. These aspects all apply to both <<code_root_password_code,the root password>> and <<code_password_code,the user password>> (if you enable a regular user).
|
|
||||||
|
|
||||||
CAUTION: DO *NOT* USE A PLAINTEXT PASSWORD IN THE `build.ini`! This is _by design_; plaintext passwords are much more insecure. If you use a plaintext password, it *will not work*.
|
|
||||||
|
|
||||||
WARNING: Remember to <<escaping_the_salted_hash,escape your hash>> before placing it in your `build.ini`!
|
|
||||||
|
|
||||||
.Password Value Scheme
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|If you have...|BDisk will...
|
|
||||||
|the string `BLANK`|give the user a blank password, allowing you to just hit `<Enter>` to log in
|
|
||||||
|nothing set|lock the account (e.g. no non-SSH login is possible)
|
|
||||||
|a properly hashed, salted, and escaped string|set the account to the password used to generate that hash.
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
.Password Value Examples
|
|
||||||
[frame="topbot",options="header,footer"]
|
|
||||||
|======================
|
|
||||||
|If the value is...|Then BDisk...
|
|
||||||
|`root_password = BLANK`|will let you log into the TTY as the root user by just hitting the `<Enter>` key.
|
|
||||||
|`root_password =`|will not allow the root user to log into the TTY at all.
|
|
||||||
|`root_password = <some salted, hashed, escaped string created from 'test'>`|will let you log into the root user on a TTY with the password `test`.
|
|
||||||
||
|
|
||||||
|======================
|
|
||||||
|
|
||||||
|
|
||||||
NOTE: I specify "TTY login" because SSH login may still be possible. By default, SSH will allow password logins for non-root users (root user SSH password login is prohibited by default; only pubkey login for root is allowed.) -- this can be overridden, however, by customization.
|
|
||||||
|
|
||||||
=== Generating a Password Salt/Hash
|
|
||||||
First, if you are not familiar with a http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES[salted hash^] that GNU/Linux uses, you may want to learn about it.
|
|
||||||
|
|
||||||
That said, there are utilities in `extra/bin/` that should generate a salted hash for you. Currently only `hashgen.py` is distributed, but additions/examples for other languages are welcome.
|
|
||||||
|
|
||||||
....
|
|
||||||
$ ./hashgen.py
|
|
||||||
|
|
||||||
What password would you like to hash/salt?
|
|
||||||
(NOTE: will NOT echo back!)
|
|
||||||
|
|
||||||
|
|
||||||
Your salted hash is:
|
|
||||||
$6$t92Uvm1ETLocDb1D$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
|
||||||
|
|
||||||
....
|
|
||||||
|
|
||||||
The password `test` was used above. In `crypt(3)`-salted hashes, there are specific sections separated by USD dollar symbols (`$`). The first section (containing `6`) marks the *hash algorithm* -- in this case, _SHA512_. (The http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES[crypt man page^] mentions all supported hash types and their corresponding ID.) The next section, `t92Uvm1ETLocDb1D`, is the *salt*. The last section is the *hash*. How salted hashes work is an original piece of data is given (in our case, the word `test`). This data is then sent through a one-way cryptographic process that generates a new string that makes it difficult to know what the original data was. THEN a salt is added- a random string- and the process repeats. In our format, this is done _5000_ times in a row. When you log in with your password, the salt is fetched and the same process is done again- predictably, the data that process goes through should then match the salted hash string stored in the password system (in this case, the https://linux.die.net/man/5/shadow[`/etc/shadow`] file).
|
|
||||||
|
|
||||||
There are other ways to generate the salted hash as well. These include:
|
|
||||||
|
|
||||||
==== Debian's `mkpasswd` Utility
|
|
||||||
Part of the https://packages.debian.org/jessie/whois[whois^] package, available in the AUR as https://aur.archlinux.org/packages/debian-whois-mkpasswd/[debian-whois-mkpasswd^].
|
|
||||||
|
|
||||||
mkpasswd --method=sha-512 <password>
|
|
||||||
|
|
||||||
==== Perl
|
|
||||||
The following Perl one-liner will generate a salted hash string (using the salt `aBcDeFgHiJ`):
|
|
||||||
|
|
||||||
perl -e 'print crypt("PASSWORD","\$6\$aBcDeFgHiJ\$") . "\n"'
|
|
||||||
|
|
||||||
==== `grub-crypt`
|
|
||||||
Legacy GRUB ("GRUB v1") includes `grub-crypt`, which will let you generate a salted hash:
|
|
||||||
|
|
||||||
/sbin/grub-crypt --sha-512
|
|
||||||
|
|
||||||
=== Escaping the Salted Hash
|
|
||||||
One last thing, and this is *very* important -- failure to perform this step will cause all sorts of strange Python errors -- is to escape the salted hash. Thankfully, however, this is a lot easier than it sounds.
|
|
||||||
|
|
||||||
So we have our salted hash: `$6$t92Uvm1ETLocDb1D$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/`. In order to get it into a usable format, we need to make sure the configuration parsing won't try to read sections of it as variables. To do this, we do something called *escaping*.
|
|
||||||
|
|
||||||
All you need to do is take the salted hash and replace every `$` you see -- there should be exactly three -- with `$$`. That's it! Count them to be sure; you should now have *6* `$` symbols present instead of three. Once you've escaped the salted hash, you're ready to roll.
|
|
||||||
|
|
||||||
=== Cheating/The Easy Way
|
|
||||||
Feeling overwhelmed? There's an easy way to do all of this.
|
|
||||||
|
|
||||||
First, while logged into your local computer, change your password to what you want ether `root_password` or `password` to be:
|
|
||||||
|
|
||||||
passwd
|
|
||||||
|
|
||||||
NOTE: Remember, changing your password won't echo the password back on the screen for security reasons!
|
|
||||||
|
|
||||||
Then get your shadow entry. This has to be done with sudo, as only the root user has access to the hashed passwords on the system. The following command will combine all steps necessary; the string it returns will be a string you can use directly in your `build.ini`.
|
|
||||||
|
|
||||||
sudo grep "^${SUDO_USER}:" /etc/shadow | awk -F':' '{print $2}' | sed -e 's/\$/$$/'
|
|
||||||
|
|
||||||
Don't forget to change your password back to what it was before!
|
|
||||||
|
|
||||||
passwd
|
|
||||||
|
|
||||||
That's it!
|
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 254 KiB |
@ -1,128 +0,0 @@
|
|||||||
== How to Netboot BDisk
|
|
||||||
I update this server with images and iPXE images you can use to netboot my personal spin of BDisk.
|
|
||||||
|
|
||||||
You can https://bdisk.square-r00t.net/download/bdisk-mini.iso[download] a demo of the iPXE functionality. Note that your computer needs to be connected to a valid Internet connection via ethernet and be able to get a DHCP lease for it to work.
|
|
||||||
|
|
||||||
NOTE: Advanced users, you can https://www.gnupg.org/gph/en/manual/x135.html[verify^] it against the GPG signature (https://bdisk.square-r00t.net/download/bdisk-mini.iso.asc[ASC], https://bdisk.square-r00t.net/download/bdisk-mini.iso.sig[BIN]). Please see https://devblog.square-r00t.net/about/my-gpg-public-key-verification-of-identity[this blog post^] for information on fetching my keys and such. Note that while this project is in flux, I may be signing with temporarily-generated throwaway keys.
|
|
||||||
|
|
||||||
Once downloaded, you can follow the appropriate steps based on your operating system:
|
|
||||||
|
|
||||||
=== Windows
|
|
||||||
==== CD/DVD
|
|
||||||
Simply put a blank CD/DVD-R (or RW, RW+, etc.) in your optical media drive. Find where you downloaded the above file (it should be named `bdisk-mini.iso`). Right-click and select *Burn disc image*.
|
|
||||||
|
|
||||||
==== USB
|
|
||||||
You'll most likely want to https://svwh.dl.sourceforge.net/project/usbwriter/USBWriter-1.3.zip[download] a program called https://sourceforge.net/projects/usbwriter/[USBWriter^]. Unzip it (or just open it via double-clicking) and copy the `USBWriter.exe` program somewhere you'll remember- your desktop, for instance.
|
|
||||||
|
|
||||||
Next, make sure your USB thumbdrive is inserted in your computer and https://support.microsoft.com/en-us/help/17418/windows-7-create-format-hard-disk-partition[formatted/"initialized"^] already.
|
|
||||||
|
|
||||||
WARNING: Formatting a disk/partition will *destroy* any and all data on that device! Make sure there is nothing on your USB drive you want to keep, as formatting BDisk to it *will* delete any data on it.
|
|
||||||
|
|
||||||
Now right-click on the USBWriter icon and select *Run as administrator*. You may get a warning pop up asking for permissions for USBWriter. It's safe to click Yes.
|
|
||||||
|
|
||||||
Select the proper USB flash drive from the *Target device* dropdown menu. If your USB drive isn't showing up, try clicking the Refresh button and looking again. (If it still doesn't show up, you may need to reboot your computer.)
|
|
||||||
|
|
||||||
Click the *Browse...* button and find where you saved `bdisk-mini.iso`. Once you've found it, double-click it. Then click *Write*. It might take a little bit of time depending on how fast your USB interface is, so give it some time. When it finishes, click *Close*. You now have a bootable USB thumbdrive.
|
|
||||||
|
|
||||||
==== Booting
|
|
||||||
Booting differs depending on each and every hardware, but *typically* you should get a message when you first start up for "_Setup_" and/or "_Boot options_" or the like. The terminology differs here. It will probably be an *F__#__* button (usually `F2`, `F4`, `F10`, or `F12`) or the *Delete* key. While rebooting, try to hold or press repeatedly this key and you should come across an option somewhere with a list of devices to boot from or an order you can set. Make sure the USB (or CD/DVD, whichever media type you're using) is set as first, and save.
|
|
||||||
|
|
||||||
=== Mac OS X/macOS
|
|
||||||
==== CD/DVD
|
|
||||||
Unfortunately, the OS X/macOS Disk Utility doesn't work with hybrid ISOs (what `bdisk-mini.iso` is). At all. You're out of luck, I'm afraid, unless you happen have a spare USB thumbdrive handy.
|
|
||||||
|
|
||||||
==== USB
|
|
||||||
We'll need to get a little messy with this one.
|
|
||||||
|
|
||||||
Open Applications => Utilities => Terminal. A black box should pop up.
|
|
||||||
|
|
||||||
Insert your USB thumbdrive now (if you haven't already) and run the following command:
|
|
||||||
|
|
||||||
diskutil list
|
|
||||||
|
|
||||||
You should see an entry, probably near the bottom, that looks something like this:
|
|
||||||
|
|
||||||
(...)
|
|
||||||
/dev/disk42 (external, physical):
|
|
||||||
#: TYPE NAME SIZE IDENTIFIER
|
|
||||||
0: *8.2 GB disk42
|
|
||||||
(...)
|
|
||||||
|
|
||||||
CAUTION: *Be sure* to find the disk that matches the size of your thumbdrive! If you use the wrong disk identifier, it will break your OS X/macOS install at best and delete all your data at worst!
|
|
||||||
|
|
||||||
Now that you've found which disk your USB device is (the `/dev/disk__#__` part), we can continue. Make sure that it is the disk ID *right above* the line that contains your flash drive size! For our example, I will use `/dev/disk__42__` as an example as it's highly unlikely you'll have that many disk IDs, but be sure to replace this in the following commands with the proper disk ID you found above.
|
|
||||||
|
|
||||||
Then we need to unmount the disk, in case it's already mounted.
|
|
||||||
|
|
||||||
diskutil unmountDisk /dev/disk42
|
|
||||||
|
|
||||||
Assuming you saved BDisk Mini to your Desktop, you can do:
|
|
||||||
|
|
||||||
sudo dd if=~/Desktop/bdisk-mini.iso of=/dev/disk42
|
|
||||||
|
|
||||||
NOTE: The above command may prompt you for a password. This is the same password you use to log into your Mac (and unlock the screensaver, etc.). No characters will show up when you type (for security reasons, in case someone is behind you watching your screen) so it may take you a couple tries.
|
|
||||||
|
|
||||||
This will run for a couple seconds. When it finishes, you should see something similar to (but not necessarily the same numbers as) this:
|
|
||||||
|
|
||||||
0+1 records in
|
|
||||||
0+1 records out
|
|
||||||
169 bytes transferred in 0.000530 secs (318865 bytes/sec)
|
|
||||||
|
|
||||||
At this point you _may_ get a popup warning you _"The disk you inserted was not readable by this computer."_ If you do, just click the *Ignore* button.
|
|
||||||
|
|
||||||
One last step. Still in Terminal:
|
|
||||||
|
|
||||||
diskutil eject /dev/disk42
|
|
||||||
|
|
||||||
You can then close Terminal.
|
|
||||||
|
|
||||||
==== Booting
|
|
||||||
The instructions here don't differ too much than from Windows, though it's always the same key. From it being in a shutdown state, power on your Macbook Pro (or whatever it is you have) and hold the *Option* key (or the *Alt* key on non-Apple keyboards). The *Option/Alt* key should bring up a boot menu that will let you select a USB device to boot from.
|
|
||||||
|
|
||||||
Strangely enough, you should still be able to _boot_ a BDisk Mini CD/DVD, you just can't *burn* one. I'm tempted to make a cheap dig at Apple, but I'll refrain.
|
|
||||||
|
|
||||||
=== GNU/Linux
|
|
||||||
==== CD/DVD
|
|
||||||
Easy. Most (if not all) of https://wiki.archlinux.org/index.php/Optical_disc_drive#Burning[these^] should support burning `bdisk-mini.iso` to disc (I'm partial to _cdrecord_). If you prefer a GUI, try some of https://wiki.archlinux.org/index.php/Optical_disc_drive#Burning_CD.2FDVD.2FBD_with_a_GUI[these^] instead (I like _k3b_).
|
|
||||||
|
|
||||||
==== USB
|
|
||||||
Very similar to OS X/macOS in approach. First open a terminal emulator- the ways of navigating to it depends on your window manager/desktop environment, but it's usually under a System or Utilities menu.
|
|
||||||
|
|
||||||
Now we need to find which disk our USB thumbdrive is. Insert your USB thumbdrive now, if you haven't already, and run in the terminal:
|
|
||||||
|
|
||||||
sudo fdisk -l
|
|
||||||
|
|
||||||
You should see a device matching your USB thumbdrive's size. In our example, I use */dev/sdz* as it's unlikely you have that many disks attached to a system, but be sure to replace this in the following commands with the proper disk ID you find.
|
|
||||||
|
|
||||||
(...)
|
|
||||||
Disk /dev/sdz: 7.6 GiB, 8178892800 bytes, 15974400 sectors
|
|
||||||
Units: sectors of 1 * 512 = 512 bytes
|
|
||||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
|
||||||
I/O size (minimum/optimal): 512 bytes / 512 bytes
|
|
||||||
(...)
|
|
||||||
|
|
||||||
CAUTION: *Be sure* to find the disk that matches the size of your thumbdrive! If you use the wrong disk identifier, it will break your GNU/Linux install (or possibly Windows install if you're dual-booting, etc.) at best and delete all your data at worst!
|
|
||||||
|
|
||||||
Make sure it isn't mounted:
|
|
||||||
|
|
||||||
umount /dev/sdz
|
|
||||||
|
|
||||||
You should get a message that says `umount: /dev/sdz: not mounted`. If it was mounted before, it's unmounted now.
|
|
||||||
|
|
||||||
Next, simply dd over the ISO file.
|
|
||||||
|
|
||||||
sudo dd if=~/Desktop/bdisk-mini.iso of=/dev/sdz
|
|
||||||
|
|
||||||
NOTE: The above command may prompt you for a password. This is the same password you use to log in (and unlock the screensaver, etc.). No characters will show up when you type (for security reasons, in case someone is behind you watching your screen) so it may take you a couple tries.
|
|
||||||
|
|
||||||
This will run for a couple seconds. When it finishes, you should see something similar to (but not necessarily the same numbers as) this:
|
|
||||||
|
|
||||||
75776+0 records in
|
|
||||||
75776+0 records out
|
|
||||||
38797312 bytes (39 MB, 37 MiB) copied, 9.01915 s, 4.3 MB/s
|
|
||||||
|
|
||||||
If you get a popup from your desktop environment (assuming you're using one) about not being able to mount a disk, or that it's unformatted, etc. and it prompts you to format, ignore/cancel/close it- do *not* format it! This would erase the BDisk Mini image on it.
|
|
||||||
|
|
||||||
==== Booting
|
|
||||||
Exactly the same as those for Windows. (Unless you're running GNU/Linux on Mac hardware, in which case follow the booting instructions for Mac instead.)
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
== Advanced Customization
|
|
||||||
If the <<the_code_build_ini_code_file,`build.ini`>> file doesn't provide enough customization to your liking, I don't blame you! It was designed only to provide the most basic control and is primarily only used to control the build process itself.
|
|
||||||
|
|
||||||
Luckily, there are a lot of changes you can make. For all of these, you'll want to make a copy of the <<code_basedir_code,`basedir`>> directory somewhere and change the basedir configuration option in the <<the_code_build_ini_code_file,`build.ini`>> file to point to that directory.
|
|
||||||
|
|
||||||
This section isn't going to cover every single use case, as that's mostly an exercise for you -- I can't predict how you want to use BDisk! But we'll cover some common cases you can use and in the process you'll know how to implement your own customizations.
|
|
||||||
|
|
||||||
include::advanced/SSH.adoc[]
|
|
||||||
include::advanced/VPN.adoc[]
|
|
||||||
include::advanced/SOFTWARE.adoc[]
|
|
||||||
include::advanced/BUILDING.adoc[]
|
|
||||||
include::advanced/AUTOLOGIN.adoc[]
|
|
||||||
include::advanced/DESKTOP.adoc[]
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
|||||||
== Building a BDisk ISO
|
|
||||||
So you finally have <<the_code_build_ini_code_file,configured>> BDisk (and perhaps added further <<advanced_customization,customizations>>. Now you're ready to build!
|
|
||||||
|
|
||||||
Building is, thankfully, the easiest part!
|
|
||||||
|
|
||||||
NOTE: Due to requiring various mounting and chrooting, BDisk must be run as the *root* user (or via _sudo_).
|
|
||||||
|
|
||||||
To initiate a build, simply run `<basedir>/bdisk/bdisk.py`. That's it! Everything should continue automatically.
|
|
||||||
|
|
||||||
If you'd like to specify a path to a specific build configuration, you can use `<basedir>/bdisk/bdisk.py path/to/build.ini`. The default is _/etc/bdisk/build.ini_ (plus <<the_code_build_ini_code_,other locations>>).
|
|
||||||
|
|
||||||
If you're using a packaged version you installed from your distro's package manager, you instead should run wherever it installs to. Most likely this is going to be `/usr/sbin/bdisk`. (On systemd build hosts that have done the https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/[/usr merge^], you can use `/usr/sbin/bdisk` or `/sbin/bdisk`.)
|
|
||||||
|
|
||||||
If you encounter any issues during the process, make sure you read the documentation -- if your issue still isn't addressed, please be sure to file a <<bug_reports_feature_requests,bug report>>!
|
|
||||||
|
|
@ -1,669 +0,0 @@
|
|||||||
== The `build.ini` File
|
|
||||||
This file is where you can specify some of the very basics of BDisk building. It allows you to specify/define certain variables and settings used by the build process. It uses https://docs.python.org/3/library/configparser.html[ConfigParser^] for the parsing engine, and you can do some https://wiki.python.org/moin/ConfigParserExamples[more advanced^] things with it than I demonstrate in the default.
|
|
||||||
|
|
||||||
It's single-level, but divided into "sections". This is unfortunately a limitation of ConfigParser, but it should be easy enough to follow.
|
|
||||||
|
|
||||||
Blank lines are ignored, as well as any lines beginning with `#` and `;`. There are some restrictions and recommendations for some values, so be sure to note them when they occur. Variables referencing other values in the `build.ini` are allowed in the format of `${keyname}` if it's in the same section; otherwise, `${section:keyname}` can be used.
|
|
||||||
|
|
||||||
If you want to use your own `build.ini` file (and you should!), the following paths are searched in order. The first one found will be used.
|
|
||||||
|
|
||||||
* `/etc/bdisk/build.ini`
|
|
||||||
* `/usr/share/bdisk/build.ini`
|
|
||||||
* `/usr/share/bdisk/extra/build.ini`
|
|
||||||
* `/usr/share/docs/bdisk/build.ini`
|
|
||||||
* `/usr/local/etc/bdisk/build.ini`
|
|
||||||
* `/usr/local/share/docs/bdisk/build.ini`
|
|
||||||
* `/opt/dev/bdisk/build.ini`
|
|
||||||
* `/opt/dev/bdisk/extra/build.ini`
|
|
||||||
* `/opt/dev/bdisk/extra/dist.build.ini`
|
|
||||||
* `<bdisk.py directory>/../build.ini`
|
|
||||||
|
|
||||||
We'll go into more detail for each section below.
|
|
||||||
|
|
||||||
=== Example
|
|
||||||
[bdisk]
|
|
||||||
name = BDISK
|
|
||||||
uxname = bdisk
|
|
||||||
pname = BDisk
|
|
||||||
ver =
|
|
||||||
dev = A Developer
|
|
||||||
email = dev@domain.tld
|
|
||||||
desc = A rescue/restore live environment.
|
|
||||||
uri = https://domain.tld
|
|
||||||
root_password =
|
|
||||||
user = yes
|
|
||||||
[user]
|
|
||||||
username = ${bdisk:uxname}
|
|
||||||
name = Default user
|
|
||||||
groups = ${bdisk:uxname},admin
|
|
||||||
password = $$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/
|
|
||||||
[source_x86_64]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile = .sig
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey = 7F2D434B9741E8AC
|
|
||||||
gpgkeyserver =
|
|
||||||
[source_i686]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey =
|
|
||||||
gpgkeyserver =
|
|
||||||
[build]
|
|
||||||
dlpath = /var/tmp/${bdisk:uxname}
|
|
||||||
chrootdir = /var/tmp/chroots
|
|
||||||
basedir = /opt/dev/bdisk
|
|
||||||
isodir = ${dlpath}/iso
|
|
||||||
srcdir = ${dlpath}/src
|
|
||||||
prepdir = ${dlpath}/temp
|
|
||||||
archboot = ${prepdir}/${bdisk:name}
|
|
||||||
mountpt = /mnt/${bdisk:uxname}
|
|
||||||
multiarch = x86_64
|
|
||||||
sign = yes
|
|
||||||
ipxe = no
|
|
||||||
i_am_a_racecar = no
|
|
||||||
[gpg]
|
|
||||||
mygpgkey =
|
|
||||||
mygpghome =
|
|
||||||
[sync]
|
|
||||||
http = no
|
|
||||||
tftp = no
|
|
||||||
git = no
|
|
||||||
rsync = no
|
|
||||||
[http]
|
|
||||||
path = ${build:dlpath}/http
|
|
||||||
user = http
|
|
||||||
group = http
|
|
||||||
[tftp]
|
|
||||||
path = ${build:dlpath}/tftpboot
|
|
||||||
user = root
|
|
||||||
group = root
|
|
||||||
[ipxe]
|
|
||||||
iso = no
|
|
||||||
uri = https://domain.tld
|
|
||||||
ssldir = ${build:dlpath}/ssl
|
|
||||||
ssl_ca = ${ssldir}/ca.crt
|
|
||||||
ssl_cakey = ${ssldir}/ca.key
|
|
||||||
ssl_crt = ${ssldir}/main.crt
|
|
||||||
ssl_key = ${ssldir}/main.key
|
|
||||||
[rsync]
|
|
||||||
host =
|
|
||||||
user =
|
|
||||||
path =
|
|
||||||
iso = no
|
|
||||||
|
|
||||||
=== `[bdisk]`
|
|
||||||
This section controls some basic branding and information having to do with the end product.
|
|
||||||
|
|
||||||
==== `name`
|
|
||||||
This value is a "basic" name of your project. It's not really shown anywhere end-user visible, but we need a consistent name that follows some highly constrained rules:
|
|
||||||
|
|
||||||
. Alphanumeric only
|
|
||||||
. 8 characters total (or less)
|
|
||||||
. No whitespace
|
|
||||||
. ASCII only
|
|
||||||
. Will be converted to uppercase if it isn't already
|
|
||||||
|
|
||||||
==== `uxname`
|
|
||||||
This value is used for filenames and the like. I highly recommend it be the same as `<<code_name_code,name>>` (in lowercase) but it doesn't need to be. It also has some rules:
|
|
||||||
|
|
||||||
. Alphanumeric only
|
|
||||||
. No whitespace
|
|
||||||
. ASCII only
|
|
||||||
. Will be converted to lowercase if it isn't already
|
|
||||||
|
|
||||||
==== `pname`
|
|
||||||
This string is used for "pretty-printing" of the project name; it should be a more human-readable string.
|
|
||||||
|
|
||||||
. *Can* contain whitespace
|
|
||||||
. *Can* be mixed-case, uppercase, or lowercase
|
|
||||||
. ASCII only
|
|
||||||
|
|
||||||
==== `ver`
|
|
||||||
The version string. If this isn't specified, we'll try to guess based on the current git commit and tags in `<<code_basedir_code,build:basedir>>`. If `<<code_basedir_code,build:basedir>>` is *not* a git repository (i.e. you installed BDisk from a package manager), you MUST specify a version number.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
|
|
||||||
==== `dev`
|
|
||||||
The name of the developer or publisher of the ISO, be it an individual or organization. For example, if you are using BDisk to build an install CD for your distro, this would be the name of your distro. The same rules as `<<code_pname_code,pname>>` apply.
|
|
||||||
|
|
||||||
. *Can* contain whitespace
|
|
||||||
. *Can* be mixed-case, uppercase, or lowercase
|
|
||||||
. ASCII only
|
|
||||||
|
|
||||||
==== `email`
|
|
||||||
An email address to use for git syncing messages, and/or GPG key generation.
|
|
||||||
|
|
||||||
==== `desc`
|
|
||||||
What this distribution/project is used for.
|
|
||||||
|
|
||||||
. *Can* contain whitespace
|
|
||||||
. *Can* be mixed-case, uppercase, or lowercase
|
|
||||||
. ASCII only
|
|
||||||
|
|
||||||
==== `uri`
|
|
||||||
What is this project's URI (website, etc.)? Alternatively, your personal site, your company's site, etc.
|
|
||||||
|
|
||||||
. Should be a valid URI understood by curl
|
|
||||||
|
|
||||||
|
|
||||||
==== `root_password`
|
|
||||||
The escaped, salted, hashed string to use for the root user.
|
|
||||||
|
|
||||||
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
|
||||||
|
|
||||||
==== `user`
|
|
||||||
*Default: no*
|
|
||||||
|
|
||||||
This setting specifies if we should create a regular (non-root) user in the live environment. See the section <<code_user_code_2,`[user]`>> for more options.
|
|
||||||
|
|
||||||
NOTE: If enabled, this user has full sudo access.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[user]`
|
|
||||||
This section of `build.ini` controls aspects about `bdisk:user`. It is only used if <<code_user_code,`bdisk:user`>> is enabled.
|
|
||||||
|
|
||||||
==== `username`
|
|
||||||
What username should the user have? Standard *nix username rules apply:
|
|
||||||
|
|
||||||
. ASCII only
|
|
||||||
. 32 characters or less
|
|
||||||
. Alphanumeric only
|
|
||||||
. Lowercase only
|
|
||||||
. No whitespace
|
|
||||||
. Cannot start with a number
|
|
||||||
|
|
||||||
==== `name`
|
|
||||||
What comment/description/real name should be used for the user? For more information on this, see the https://linux.die.net/man/5/passwd[passwd(5) man page^]'s section on *GECOS*.
|
|
||||||
|
|
||||||
. ASCII only
|
|
||||||
|
|
||||||
==== `groups`
|
|
||||||
What groups this user should be added to, comma-separated. They will be created if they don't exist yet. Standard *nix group names rules apply:
|
|
||||||
|
|
||||||
. ASCII only
|
|
||||||
. 32 characters or less
|
|
||||||
. Can only contain lower-case letters, numeric digits, underscores, or dashes (and can end with a dollar sign)
|
|
||||||
. Must start with a (lower-case) letter or underscore
|
|
||||||
. No whitespace
|
|
||||||
|
|
||||||
==== `password`
|
|
||||||
The escaped, salted, hashed string to use for the non-root user.
|
|
||||||
|
|
||||||
Please see <<passwords,the section on passwords>> for information on this value. In the <<example,example above>>, the string `$$6$$t92Uvm1ETLocDb1D$$BvI0Sa6CSXxzIKBinIaJHb1gLJWheoXp7WzdideAJN46aChFu3hKg07QaIJNk4dfIJ2ry3tEfo3FRvstKWasg/` is created from the password `test`. I cannot stress this enough, do not use a plaintext password here nor just use a regular `/etc/shadow` file/`crypt(3)` hash here. Read the section. I promise it's short.
|
|
||||||
|
|
||||||
=== `[source_<arch>]`
|
|
||||||
This section controls where to fetch the "base" tarballs.
|
|
||||||
|
|
||||||
NOTE: Previously, these settings were *not* architecture-specific, and included in the <<code_build_code,`build`>> section.
|
|
||||||
|
|
||||||
It was necessary to create this section per architecture, because https://www.archlinux.org/news/phasing-out-i686-support/[Arch Linux has dropped i686 support^]. However, plenty of other distros also have removed support and other third-party projects have ported. (You can find the Arch Linux 32-bit/i686 port project http://archlinux32.org/[here^].)
|
|
||||||
|
|
||||||
The directives here are only covered once, however, since both sections are identical- they just allow you to specify different mirrors. Note that the two settings are `[source_i686]` (for 32-bit) and `[source_x86_64]` (for 64-bit/multilib).
|
|
||||||
|
|
||||||
Which section is used (or both) depends on what <<code_multiarch_code, architectures you have enabled>> for the build.
|
|
||||||
|
|
||||||
==== `mirror`
|
|
||||||
A mirror that hosts the bootstrap tarball. It is *highly* recommended you use an Arch Linux https://wiki.archlinux.org/index.php/Install_from_existing_Linux#Method_A:_Using_the_bootstrap_image_.28recommended.29[bootstrap tarball^] as the build process is highly specialized to this (but <<bug_reports_feature_requests,patches/feature requests>> are welcome for other built distros). You can find a list of mirrors at the bottom of Arch's https://www.archlinux.org/download/[download page^].
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be accessible remotely/via a WAN-recognized address
|
|
||||||
. Must be a domain/FQDN (or IP address) only; no paths (those come later!)
|
|
||||||
|
|
||||||
==== `mirrorproto`
|
|
||||||
What protocol should we use for the <<code_mirror_code,`mirror`>>?
|
|
||||||
|
|
||||||
|======================
|
|
||||||
^s|Must be (case-insensitive) one of: ^.^m|http ^.^m|https ^.^m|ftp
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `mirrorpath`
|
|
||||||
What is the path to the tarball directory on the <<code_mirror_code,`mirror`>>?
|
|
||||||
|
|
||||||
. Must be a complete path (e.g. `/dir1/subdir1/subdir2`)
|
|
||||||
. No whitespace
|
|
||||||
|
|
||||||
==== `mirrorfile`
|
|
||||||
What is the filename for the tarball found in the path specified in <<code_mirrorpath_code,`mirrorpath`>> ? If left blank, we will use the hash <<code_mirrorchksum_code,checksum>> file to try to guess the most recent file.
|
|
||||||
|
|
||||||
==== `mirrorchksum`
|
|
||||||
*[optional]* +
|
|
||||||
*default: (no hash checking done)* +
|
|
||||||
*requires: <<code_chksumtype_code,`chksumtype`>>*
|
|
||||||
|
|
||||||
The path to a checksum file of the bootstrap tarball.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be the full path
|
|
||||||
. Don't include the <<code_mirror_code,mirror domain>> or <<code_mirrorproto_code,protocol>>
|
|
||||||
|
|
||||||
==== `chksumtype`
|
|
||||||
The algorithm that <<code_mirrorchksum_code,`mirrorchksum`>>'s hashes are in.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
7+^|Accepts one of:
|
|
||||||
^m|blake2b
|
|
||||||
^m|blake2s
|
|
||||||
^m|md5
|
|
||||||
^m|sha1
|
|
||||||
^m|sha224
|
|
||||||
^m|sha256
|
|
||||||
^m|sha384
|
|
||||||
^m|sha512
|
|
||||||
^m|sha3_224
|
|
||||||
^m|sha3_256
|
|
||||||
^m|sha3_384
|
|
||||||
^m|sha3_512
|
|
||||||
^m|shake_128
|
|
||||||
^m|shake_256
|
|
||||||
|======================
|
|
||||||
|
|
||||||
TIP: You may have support for additional hashing algorithms, but these are the ones gauranteed to be supported by Python's https://docs.python.org/3/library/hashlib.html[hashlib module^]. To get a full list of algorithms the computer you're building on supports, you can run `python3 -c 'import hashlib;print(hashlib.algorithms_available)'`. Most likely, however, <<code_mirrorchksum_code,`mirrorchksum`>> is going to be hashes of one of the above.
|
|
||||||
|
|
||||||
==== `mirrorgpgsig`
|
|
||||||
*[optional]* +
|
|
||||||
*default: (no GPG checking done)* +
|
|
||||||
*requires: <<optional,_gpg/gnupg_>>* +
|
|
||||||
*requires: <<code_gpgkey_code,`gpgkey`>>*
|
|
||||||
|
|
||||||
If the bootstrap tarball file has a GPG signature, we can use it for extra checking. If it's blank, GPG checking will be disabled.
|
|
||||||
|
|
||||||
If you specify just `.sig` (or use the default and don't specify a <<code_mirrorfile_code,`mirrorfile`>>), BDisk will try to guess based on the file from the hash <<code_mirrorchksum_code,checksum>> file. Note that unless you're using the `.sig` "autodetection", this must evaluate to a full URL. (e.g. `${mirrorproto}://${mirror}${mirrorpath}somefile.sig`)
|
|
||||||
|
|
||||||
==== `gpgkey`
|
|
||||||
*requires: <<optional,_gpg/gnupg_>>*
|
|
||||||
|
|
||||||
What is a key ID that should be used to verify/validate the <<code_mirrorgpgsig_code,`mirrorgpgsig`>>?
|
|
||||||
|
|
||||||
. Only used if <<code_mirrorgpgsig_code,`mirrorgpgsig`>> is set
|
|
||||||
. Can be in "short" form (e.g. _7F2D434B9741E8AC_) or "full" form (_4AA4767BBC9C4B1D18AE28B77F2D434B9741E8AC_), with or without the _0x_ prefix.
|
|
||||||
|
|
||||||
==== `gpgkeyserver`
|
|
||||||
*default: blank (GNUPG-bundled keyservers)* +
|
|
||||||
*requires: <<optional,_gpg/gnupg_>>*
|
|
||||||
|
|
||||||
What is a valid keyserver we should use to fetch <<code_gpgkey_code,`gpgkey`>>?
|
|
||||||
|
|
||||||
. Only used if <<code_mirrorgpgsig_code,`mirrorgpgsig`>> is set
|
|
||||||
. The default (blank) is probably fine. If you don't specify a personal GPG config, then you'll most likely want to leave this blank.
|
|
||||||
. If set, make sure it is a valid keyserver URI (e.g. `hkp://keys.gnupg.net`)
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[build]`
|
|
||||||
This section controls some aspects about the host and things like filesystem paths, etc.
|
|
||||||
|
|
||||||
|
|
||||||
==== `gpg`
|
|
||||||
Should we sign our release files? See the <<code_gpg_code_2,`[gpg]`>> section.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `dlpath`
|
|
||||||
Where should the release files be saved? Note that many other files are created here as well.
|
|
||||||
|
|
||||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `chrootdir`
|
|
||||||
Where the bootstrap tarball(s) extract to, where the chroots are built and prepped for filesystems on the live media.
|
|
||||||
|
|
||||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `basedir`
|
|
||||||
Where your <<extra,`extra/`>> and <<overlay,`overlay/`>> directories are located. If you checked out from git, this would be your git worktree directory.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must exist and contain the above directories populated with necessary files
|
|
||||||
|
|
||||||
==== `isodir`
|
|
||||||
This is the output directory of ISO files when they're created (as well as GPG signatures if you <<code_gpg_code,enabled them>>).
|
|
||||||
|
|
||||||
WARNING: If you manage your project in git, this should not be checked in as it has many large files that are automatically generated!
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `srcdir`
|
|
||||||
This is where we save and compile source code if we need to dynamically build components (such as iPXE for mini ISOs).
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist (and is needed)
|
|
||||||
|
|
||||||
==== `prepdir`
|
|
||||||
This is the directory we use for staging.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `archboot`
|
|
||||||
This directory is used to stage boot files.
|
|
||||||
|
|
||||||
WARNING: This directory should not be the exact same path as other directives! If so, you will cause your ISO to be much larger than necessary. A subdirectory of another directive's path, however, is okay.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `mountpt`
|
|
||||||
The path to use as a mountpoint.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
|
|
||||||
==== `multiarch`
|
|
||||||
*default: yes*
|
|
||||||
|
|
||||||
Whether or not to build a "multiarch" image- that is, building support for both x86_64 and i686 in the same ISO.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
s|In order to... 3+^|Accepts (case-insensitive) one of:
|
|
||||||
s|build a multiarch ISO ^m|yes ^m|true ^m|1
|
|
||||||
s|build a separate ISO for each architecture ^m|no ^m|false ^m|0
|
|
||||||
s|only build an i686-architecture ISO ^m|i686 ^m|32 ^m|no64
|
|
||||||
s|only build an x86_64-architecture ISO ^m|x86_64 ^m|64 ^m|no32
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `ipxe`
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
Enable iPXE ("mini ISO") functionality.
|
|
||||||
|
|
||||||
NOTE: This has no bearing on the <<code_sync_code,`[sync]`>> section, so you can create an iPXE HTTP preparation for instance without needing to sync it anywhere (in case you're building on the webserver itself).
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `i_am_a_racecar`
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
This option should only be enabled if you are on a fairly powerful, multicore system with plenty of RAM. It will speed the build process along, but will have some seriously adverse effects if your system can't handle it. Most modern systems should be fine with enabling it.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[gpg]`
|
|
||||||
This section controls settings for signing our release files. This is only used if <<code_gpg_code,`build:gpg`>> is enabled.
|
|
||||||
|
|
||||||
==== `mygpgkey`
|
|
||||||
A valid key ID that BDisk should use to _sign_ release files.
|
|
||||||
|
|
||||||
. You will be prompted for a passphrase if your key has one/you don't have an open and authorized gpg-agent session. Make sure you have a working pinentry configuration set up!
|
|
||||||
. If you leave this blank we will use the key we generate automatically earlier in the build process.
|
|
||||||
. We will generate one if this is blank and you have selected sign as yes.
|
|
||||||
|
|
||||||
==== `mygpghome`
|
|
||||||
The directory should be used for the above GPG key if specified. Make sure it contains a keybox (`.kbx`) your private key. (e.g. `/home/username/.gnupg`)
|
|
||||||
|
|
||||||
=== `[sync]`
|
|
||||||
This section controls what we should do with the resulting build and how to handle uploads, if we choose to use those features.
|
|
||||||
|
|
||||||
==== `http`
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
If enabled, BDisk will generate/prepare HTTP files. This is mostly only useful if you plan on using iPXE. See the <<code_http_code_2,`[http]`>> section.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `tftp`
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
If enabled, BDisk will generate/prepare TFTP files. This is mostly only useful if you plan on using more traditional (non-iPXE) setups and regualar PXE bootstrapping into iPXE.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `git`
|
|
||||||
*requires: <<optional,git>>* +
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
Enable automatic Git pushing for any changes done to the project itself. If you don't have upstream write/push access, you'll want to disable this.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `rsync`
|
|
||||||
*requires: <<optional,rsync>>* +
|
|
||||||
*default: no*
|
|
||||||
|
|
||||||
Enable rsync pushing for the ISO (and other files, if you choose- useful for iPXE over HTTP(S)).
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[http]`
|
|
||||||
This section controls details about HTTP file preparation/generation. Only used if <<code_http_code,`sync:http`>> is enabled.
|
|
||||||
|
|
||||||
==== `path`
|
|
||||||
This directory is where to build an HTTP webroot.
|
|
||||||
|
|
||||||
WARNING: MAKE SURE you do not store files here that you want to keep! They will be deleted!
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. If blank, HTTP preparation/generation will not be done
|
|
||||||
. If specified, it will be created if it doesn't exist
|
|
||||||
. Will be deleted first
|
|
||||||
|
|
||||||
==== `user`
|
|
||||||
What user the HTTP files should be owned as. This is most likely going to be either 'http', 'nginx', or 'apache'.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. User must exist on build system
|
|
||||||
|
|
||||||
|======================
|
|
||||||
^s|Can be one of: ^.^m|username ^.^m|http://www.linfo.org/uid.html[UID]
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `group`
|
|
||||||
What group the HTTP files should be owned as. This is most likely going to be either 'http', 'nginx', or 'apache'.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Group must exist on build system
|
|
||||||
|
|
||||||
|======================
|
|
||||||
^s|Can be one of: ^.^m|groupname ^.^m|https://linux.die.net/man/5/group[GID]
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[tftp]`
|
|
||||||
This section controls details about TFTP file preparation/generation. Only used if <<code_tftp_code,`sync:tftp`>> is enabled.
|
|
||||||
|
|
||||||
==== `path`
|
|
||||||
The directory where we want to build a TFTP root.
|
|
||||||
|
|
||||||
WARNING: MAKE SURE you do not store files here that you want to keep! They will be deleted!
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it doesn't exist
|
|
||||||
. Will be deleted first
|
|
||||||
|
|
||||||
==== `user`
|
|
||||||
What user the TFTP files should be owned as. This is most likely going to be either 'tftp', 'root', or 'nobody'.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. User must exist on build system
|
|
||||||
|
|
||||||
|======================
|
|
||||||
^s|Can be one of: ^.^m|username ^.^m|http://www.linfo.org/uid.html[UID]
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `group`
|
|
||||||
What group the TFTP files should be owned as. This is most likely going to be either 'tftp', 'root', or 'nobody'.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Group must exist on build system
|
|
||||||
|
|
||||||
|======================
|
|
||||||
^s|Can be one of: ^.^m|groupname ^.^m|https://linux.die.net/man/5/group[GID]
|
|
||||||
|======================
|
|
||||||
|
|
||||||
=== `[ipxe]`
|
|
||||||
This section controls aspects of iPXE building. Only used if <<code_ipxe_code,`build:ipxe`>> is enabled.
|
|
||||||
|
|
||||||
==== `iso`
|
|
||||||
*default: no* +
|
|
||||||
*requires: <<optional,_git_>>*
|
|
||||||
|
|
||||||
Build a "mini-ISO"; that is, an ISO file that can be used to bootstrap an iPXE environment (so you don't need to set up a traditional PXE environment on your LAN). We'll still build a full standalone ISO no matter what.
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
||||||
|
|
||||||
==== `uri`
|
|
||||||
What URI iPXE's EMBED script should use. This would be where you host an iPXE chainloading script on a webserver, for instance. See iPXE's example of http://ipxe.org/scripting#dynamic_scripts[dynamic scripts^] for an example of the script that would be placed at this URI.
|
|
||||||
|
|
||||||
NOTE: If you require HTTP BASIC Authentication or HTTP Digest Authentication (untested), you can format it via `https://user:password@bdisk.square-r00t.net/boot.php`.
|
|
||||||
|
|
||||||
NOTE: This currently does not work for HTTPS with self-signed certificates.
|
|
||||||
|
|
||||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
|
||||||
|
|
||||||
==== `ssldir`
|
|
||||||
Directory to hold SSL results, if we are generating keys, certificates, etc.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Will be created if it does not exist
|
|
||||||
|
|
||||||
==== `ssl_ca`
|
|
||||||
Path to the (root) CA certificate file iPXE should use. See http://ipxe.org/crypto[iPXE's crypto page^] for more information.
|
|
||||||
|
|
||||||
NOTE: You can use your own CA to sign existing certs. This is handy if you run a third-party/"Trusted" root-CA-signed certificate for the HTTPS target.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be in PEM/X509 format
|
|
||||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
|
||||||
. If it exists, a matching key (ssl_cakey) *must* be specified
|
|
||||||
.. However, if left blank/doesn't exist, one will be automatically generated
|
|
||||||
|
|
||||||
==== `ssl_cakey`
|
|
||||||
Path to the (root) CA key file iPXE should use.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be in PEM/X509 format
|
|
||||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
|
||||||
. If left blank or it doesn't exist (and <<code_ssl_ca_code,`ssl_ca`>> is also blank), one will be automatically generated
|
|
||||||
. *Must* match/pair to <<code_ssl_ca_code,`ssl_ca`>> if specified/exists
|
|
||||||
. MUST NOT be passphrase-protected/DES-encrypted
|
|
||||||
|
|
||||||
==== `ssl_crt`
|
|
||||||
Path to the _client_ certificate iPXE should use.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be in PEM/X509 format
|
|
||||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
|
||||||
. If specified/existent, a matching CA cert (<<code_ssl_ca_code,`ssl_ca`>>) and key (<<code_ssl_cakey_code,`ssl_cakey`>>) *must* be specified
|
|
||||||
.. However, if left blank/doesn't exist, one will be automatically generated
|
|
||||||
. *Must* be signed by <<code_ssl_ca_code,`ssl_ca`>>/<<code_ssl_cakey_code,`ssl_cakey`>> if specified and already exists
|
|
||||||
|
|
||||||
==== `ssl_key`
|
|
||||||
Path to the _client_ key iPXE should use.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. Must be in PEM/X509 format
|
|
||||||
. *Required* if <<code_iso_code,`iso`>> is enabled
|
|
||||||
. If left blank/nonexistent (and <<code_ssl_ca_code,`ssl_ca`>> is also blank), one will be automatically generated
|
|
||||||
|
|
||||||
=== `[rsync]`
|
|
||||||
This section controls aspects of rsync pushing. Only used if <<code_rsync_code,`sync:rsync`>> is enabled.
|
|
||||||
|
|
||||||
==== `host`
|
|
||||||
The rsync destination host.
|
|
||||||
|
|
||||||
. Must resolve from the build server
|
|
||||||
. Can be host, FQDN, or IP address
|
|
||||||
|
|
||||||
==== `user`
|
|
||||||
This is the remote user we should use when performing the rsync push.
|
|
||||||
|
|
||||||
. User must exist on remote system
|
|
||||||
. SSH pubkey authorization must be configured
|
|
||||||
. The destination's hostkey must be added to your local build user's known hosts
|
|
||||||
|
|
||||||
==== `path`
|
|
||||||
This is the remote destination path we should use for pushing via rsync.
|
|
||||||
|
|
||||||
|
|
||||||
NOTE: You'll probably want to set <<code_user_code_3,`http:user`>> and <<code_group_code,`http:group`>> to what it'll need to be on the destination.
|
|
||||||
|
|
||||||
. No whitespace
|
|
||||||
. The path *must* exist on the remote host
|
|
||||||
. The path MUST be writable by <<code_user_code_5,`user`>>
|
|
||||||
|
|
||||||
==== `iso`
|
|
||||||
Should we rsync over the ISO files too, or just the boot files?
|
|
||||||
|
|
||||||
[options="header"]
|
|
||||||
|======================
|
|
||||||
2+^|Accepts (case-insensitive) one of:
|
|
||||||
^m|yes ^m|no
|
|
||||||
^m|true ^m|false
|
|
||||||
^m|1 ^m|0
|
|
||||||
|======================
|
|
@ -1,80 +0,0 @@
|
|||||||
== Getting Started
|
|
||||||
|
|
||||||
=== Downloading
|
|
||||||
If it isn't in your distro's repositories (It *is* in Arch's AUR! Both https://aur.archlinux.org/packages/bdisk/[tagged release^] and https://aur.archlinux.org/packages/bdisk-git/[git master^].), you can still easily get rolling. Simply visit the project's https://git.square-r00t.net/BDisk/[source code web interface^] and download a tarball under the *Download* column:
|
|
||||||
|
|
||||||
image::fig1.1.png[cgit,align="center"]
|
|
||||||
|
|
||||||
If you know the tag of the commit you want, you can use curl:
|
|
||||||
|
|
||||||
curl -sL -o bdisk.tar.xz https://git.square-r00t.net/BDisk/snapshot/BDisk-3.00-BETA.tar.xz
|
|
||||||
|
|
||||||
or wget:
|
|
||||||
|
|
||||||
wget -O bdisk.tar.xz https://git.square-r00t.net/BDisk/snapshot/BDisk-3.00-BETA.tar.xz
|
|
||||||
|
|
||||||
You can use `https://git.square-r00t.net/BDisk/snapshot/BDisk-master.tar.xz` for the URL if you want the latest working version. If you want a snapshot of a specific commit, you can use e.g. `https://git.square-r00t.net/BDisk/snapshot/BDisk-a1fe1dbc0a0ce2b2a5d1b470d30b60636f9b2efa.tar.xz` and so on.
|
|
||||||
|
|
||||||
Alternatively, you can use https://git-scm.com/[git^]. Git most definitely _should_ be in your distro's repositories.
|
|
||||||
|
|
||||||
TIP: If you're new to git and want to learn more, I highly recommend the book https://git-scm.com/book/en/v2[Pro Git^]. It is available for free download (or online reading).
|
|
||||||
|
|
||||||
You can clone via https:
|
|
||||||
|
|
||||||
git clone https://git.square-r00t.net/BDisk
|
|
||||||
|
|
||||||
or native git protocol:
|
|
||||||
|
|
||||||
git clone git://git.square-r00t.net/bdisk.git BDisk
|
|
||||||
|
|
||||||
The git protocol is much faster, but at a cost of lessened security.
|
|
||||||
|
|
||||||
NOTE: I also have a mirror at https://github.com/johnnybubonic/BDisk[GitHub^], but I don't like GitHub very much and since it's a mirror repository, it's possible it will be out of date. For this reason, it's recommended that you use the resources above.
|
|
||||||
|
|
||||||
=== Prerequisites
|
|
||||||
This is a list of software you'll need available to build with BDisk.
|
|
||||||
|
|
||||||
TIP: Your distro's package manager should have most if not all of these available, so it's unlikely you'll need to install from source.
|
|
||||||
|
|
||||||
NOTE: Some versions may be higher than actually needed (especially _gcc_).
|
|
||||||
|
|
||||||
CAUTION: You will need at least about *15GB* of free disk space, depending on what options you enable. Each architecture chroot (i.e. x86_64, i686) is about 3.5GB after a build using the default package set (more on that later), each architecture release tarball (what we use to build the chroots) is approximately 115MB each, and each squashed image per architecture is 1.1GB (if you use the default package set). If you don't understand what this means quite yet, don't worry- we'll go into more detail later on. Just know that you'll need a fair bit of free disk space.
|
|
||||||
|
|
||||||
==== Necessary
|
|
||||||
These are needed for using BDisk.
|
|
||||||
|
|
||||||
* https://www.python.org/[Python^] (>=3.5)
|
|
||||||
* https://github.com/dosfstools/dosfstools[dosfstools^]
|
|
||||||
* http://libburnia-project.org[libisoburn^]
|
|
||||||
* http://squashfs.sourceforge.net[squashfs-tools^] (>=4.2)
|
|
||||||
|
|
||||||
These are required Python modules:
|
|
||||||
|
|
||||||
* https://pypi.python.org/pypi/GitPython[GitPython^]
|
|
||||||
* https://pypi.python.org/pypi/humanize[Humanize^]
|
|
||||||
* http://jinja.pocoo.org/[Jinja2^]
|
|
||||||
* https://pypi.python.org/pypi/psutil[PSUtil^]
|
|
||||||
* https://pypi.python.org/pypi/patch[Patch^]
|
|
||||||
* https://pypi.python.org/pypi/pygpgme[PyGPGME^]
|
|
||||||
* https://pypi.python.org/pypi/pyOpenSSL[PyOpenSSL^]
|
|
||||||
* https://pypi.python.org/pypi/validators[Validators^]
|
|
||||||
|
|
||||||
==== Optional
|
|
||||||
While not strictly necessary, these will greatly enhance your BDisk usage. I've included some reasons why you might want to install them.
|
|
||||||
|
|
||||||
NOTE: If you do not wish to install any of these or cannot install them, be sure to disable the relevant options in the `build.ini` file (we'll talk about that later). The default `extra/dist.build.ini` should be sane enough to not require any of these.
|
|
||||||
|
|
||||||
* http://cdrtools.sourceforge.net/private/cdrecord.html[cdrtools^]
|
|
||||||
** Needed for building iPXE.
|
|
||||||
* http://gcc.gnu.org[gcc (multilib)^] (>=6.x)
|
|
||||||
** Needed for building iPXE.
|
|
||||||
* http://gcc.gnu.org[gcc-libs (multilib)^] (>=6.x)
|
|
||||||
** (Same as _gcc_.)
|
|
||||||
* https://git-scm.com/[git^]
|
|
||||||
** For autodetection of version, automatically making commits for your project, checking out source code, etc.
|
|
||||||
* https://www.gnupg.org/[gpg/gnupg^] (>=2.1.11)
|
|
||||||
** For automatically signing releases, verifying downloaded files from the Internet as part of the build process, etc. It's okay if you don't have a key set up!
|
|
||||||
* https://rsync.samba.org/[rsync^]
|
|
||||||
** For syncing built ISOs to a fileserver, syncing to a remote iPXE server, syncing to a traditional PXE/TFTP server, etc.
|
|
||||||
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
|||||||
== Important Concepts
|
|
||||||
If this is your first foray into building live distros, there are some terms and concepts we need to understand first. This will simplify the process later on.
|
|
||||||
|
|
||||||
=== Terms
|
|
||||||
An *operating system*, or OS, is what your programs (email client, web browser, etc.) run on.
|
|
||||||
|
|
||||||
There are two basic types of booting systems that communicate between the *hardware* (the physical computer itself and its components) and the operating system: https://en.wikipedia.org/wiki/BIOS[*BIOS*^] (Basic Input/Output System) which has been around for quite some time and the newer https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface[*UEFI*^] (Unified Extensible Firmware Interface). Don't worry, you don't need to memorize what they're acronyms for and there won't be an exam -- just remember that BIOS is an older technology and UEFI is the newer one (and that they operate differently).
|
|
||||||
|
|
||||||
*GNU/Linux*, sometimes just referred to as _Linux_ (And there is a difference between the terminologies, but it's nuanced. You are welcome to https://www.gnu.org/gnu/linux-and-gnu.en.html[read up on it^] though!), is an example of an operating system. Other examples include _Windows_, _macOS_ (previously _OS X_), _iOS_, _Android_, and a whole slew of others. There are many types of GNU/Linux offerings, called _distributions_, _flavors_, or _distros_.
|
|
||||||
|
|
||||||
A *live distro*, *live CD*, *live DVD*, *live USB*, and the like are a way of booting an operating system without installing it on the hard drive- this means the computer doesn't even need a hard drive installed, or it doesn't matter if the installed operating system is broken. Typically they are Linux-based, but there are several Windows-based live releases out there (usually they're focused on rescuing broken Windows systems, so they're not very flexible).
|
|
||||||
|
|
||||||
*Hybrid ISOs* are ISO files that can be burned to optical media (CDs, DVDs, etc.) and also be _dd_'d directly to a USB thumbdrive (for computers that support booting from USB). That means one file, multiple media types.
|
|
||||||
|
|
||||||
*Architectures* are different hardware platforms. This mostly refers to the CPU. Common implementations are *64-bit* (also known as *x86_64* or *AMD64* for ones that support running both 64-bit and 32-bit software, or *IA64* or *Itanium* for processors that only support 64-bit) and *32-bit* (or *i686* and the older *i386* and *i486* implementations). Most consumer PCs on the market today are x86_64.
|
|
||||||
|
|
||||||
*Chroots*, *chrooting*, and the like are variants on the word *chroot*. A *chroot* is a way of running a GNU/Linux install "inside" another GNU/Linux distro. It's sort of like a virtual machine, or VM, except that it's a lot more lightweight and it doesn't do any actual virtualization- and uses the host's kernel, memory mapping, etc. It's very useful for development of operating systems.
|
|
||||||
|
|
||||||
*PXE*, or Pre-boot eXecution Environment, is a way of booting operating systems over a local network.
|
|
||||||
|
|
||||||
*iPXE* is a http://ipxe.org/[project^] that builds a very small Linux kernel, UNDI (traditional PXE) images, and the like that allow you to essentially use PXE over the Internet. It's very flexible and customizable, and supports a custom scripting engine and such.
|
|
||||||
|
|
||||||
=== Why live media is necessary/Why you might want BDisk
|
|
||||||
"But Brent," I hear you ask in a voice which most likely is nothing close to what you actually sound like and entirely in my head, "Why would I need a live CD/USB/etc.? And why BDisk?"
|
|
||||||
|
|
||||||
Elementary, my dear imaginary reader! I touch on some reasons why one might want live media in the beginning of the <<USER.adoc#user_manual,User Manual>>, but here's why you might want BDisk specifically as opposed to another live distro (or <<FAQ.adoc#i_don_t_like_bdisk_are_there_any_other_alternatives,live distro creator>>).
|
|
||||||
|
|
||||||
* Fully customizable
|
|
||||||
* Works with a multitude of GNU/Linux distros -- both for the host build system and as the guest. (Still under development!)
|
|
||||||
* It performs optimizations and compression to help you get the smallest ISO possible.
|
|
||||||
* In addition to building hybrid ISOs, it supports building iPXE hybrid ISOs (meaning you only need a very small file; the rest of the operating system boots over the Internet).
|
|
||||||
* It supports both BIOS and UEFI systems- both the full image and the iPXE images.
|
|
||||||
* It supports multiple architectures (x86_64, i686, possibly IA64 -- untested) on the same ISO.
|
|
||||||
* It supports automatically syncing to a web mirror, PXE boot server, etc. via rsync upon successful build.
|
|
||||||
* It supports SecureBoot (untested!).
|
|
||||||
* It is 100% compatible with both the https://wiki.archlinux.org/index.php/installation_guide[Arch installation guide^] and the https://wiki.gentoo.org/wiki/Handbook:AMD64#Installing_Gentoo[Gentoo installation guide^].
|
|
||||||
* It allows for non-interactive/automated building (i.e. nightly images).
|
|
||||||
* It supports arbitrary file inclusion in a defined path on the ISO itself, not via some arbitrary directory as a separate partition on the media.
|
|
||||||
* It can automatically build an accompanying "mini" ISO using iPXE -- which is also a hybrid, UEFI-supported ISO.
|
|
||||||
* Automatic versioning based on git tags (optional).
|
|
||||||
|
|
||||||
=== Who might want to use BDisk?
|
|
||||||
* System builders/hardware testers
|
|
||||||
* System Administrators/Engineers/Architects
|
|
||||||
* Information Security professionals
|
|
||||||
* Computer repair shops
|
|
||||||
* Technology Consultants
|
|
||||||
* Hobbyists
|
|
||||||
* Home GNU/Linux users
|
|
||||||
* Technology enthusiasts
|
|
||||||
|
|
@ -1,81 +0,0 @@
|
|||||||
== Project Structure
|
|
||||||
The following is a tree of files and directories in a BDisk root directory. Note that yours may not look quite like this, as BDisk supports some directory relocation to aid in packaging for distros. These will be examined in-depth in the coming sections.
|
|
||||||
|
|
||||||
<BDisk root directory>
|
|
||||||
├── bdisk
|
|
||||||
│ ├── bchroot.py
|
|
||||||
│ ├── bdisk.py
|
|
||||||
│ ├── bGPG.py
|
|
||||||
│ ├── bSSL.py
|
|
||||||
│ ├── bsync.py
|
|
||||||
│ ├── build.py
|
|
||||||
│ ├── host.py
|
|
||||||
│ ├── ipxe.py
|
|
||||||
│ └── prep.py
|
|
||||||
├── docs
|
|
||||||
│ ├── COPYING
|
|
||||||
│ ├── LICENSE -> COPYING
|
|
||||||
│ ├── manual
|
|
||||||
│ │ └── (...)
|
|
||||||
│ ├── README
|
|
||||||
├── examples
|
|
||||||
│ └── HTTP
|
|
||||||
│ └── (...)
|
|
||||||
├── extra
|
|
||||||
│ ├── bdisk.png
|
|
||||||
│ ├── bin
|
|
||||||
│ │ └── (...)
|
|
||||||
│ ├── dist.build.ini
|
|
||||||
│ ├── external
|
|
||||||
│ │ └── (...)
|
|
||||||
│ ├── mirrorlist
|
|
||||||
│ ├── pre-build.d
|
|
||||||
│ │ ├── (...)
|
|
||||||
│ │ ├── i686
|
|
||||||
│ │ │ └── (...)
|
|
||||||
│ │ └── x86_64
|
|
||||||
│ │ └── (...)
|
|
||||||
│ └── templates
|
|
||||||
│ ├── BIOS
|
|
||||||
│ │ ├── isolinux.cfg.arch.j2
|
|
||||||
│ │ └── isolinux.cfg.multi.j2
|
|
||||||
│ ├── EFI
|
|
||||||
│ │ ├── base.conf.j2
|
|
||||||
│ │ ├── loader.conf.j2
|
|
||||||
│ │ ├── ram.conf.j2
|
|
||||||
│ │ ├── uefi1.conf.j2
|
|
||||||
│ │ └── uefi2.conf.j2
|
|
||||||
│ ├── GPG.j2
|
|
||||||
│ ├── iPXE
|
|
||||||
│ │ ├── BIOS
|
|
||||||
│ │ │ └── isolinux.cfg.j2
|
|
||||||
│ │ ├── EFI
|
|
||||||
│ │ │ ├── base.conf.j2
|
|
||||||
│ │ │ └── loader.conf.j2
|
|
||||||
│ │ ├── EMBED.j2
|
|
||||||
│ │ ├── patches
|
|
||||||
│ │ │ ├── 01.git-version.patch.j2
|
|
||||||
│ │ │ └── 02.banner.patch.j2
|
|
||||||
│ │ └── ssl
|
|
||||||
│ │ └── openssl.cnf
|
|
||||||
│ ├── overlay
|
|
||||||
│ │ ├── (...)
|
|
||||||
│ │ ├── i686
|
|
||||||
│ │ ├── x86_64
|
|
||||||
│ ├── pre-build.d
|
|
||||||
│ │ ├── (...)
|
|
||||||
│ │ ├── i686
|
|
||||||
│ │ ├── x86_64
|
|
||||||
│ ├── VARS.txt.j2
|
|
||||||
│ └── VERSION_INFO.txt.j2
|
|
||||||
└── overlay
|
|
||||||
├── (...)
|
|
||||||
├── i686
|
|
||||||
└── x86_64
|
|
||||||
|
|
||||||
include::fslayout/BDISK.adoc[]
|
|
||||||
include::fslayout/DOCS.adoc[]
|
|
||||||
include::fslayout/EXAMPLES.adoc[]
|
|
||||||
include::fslayout/EXTRA.adoc[]
|
|
||||||
include::fslayout/OVERLAY.adoc[]
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
|||||||
=== Automatic Login (TTY)
|
|
||||||
If you don't want to have to log into the TTY on boot, BDisk can automatically log in for you with a given username.
|
|
||||||
|
|
||||||
If, for example, you want a terminal to auto-login on TTY1 with the root user, you would create the following file at `<basedir>/overlay/etc/systemd/system/getty@tty1.service.d/autologin.conf`:
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=idle
|
|
||||||
ExecStart=
|
|
||||||
ExecStart=-/usr/bin/agetty --autologin root --noclear %I 38400 linux
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
=== Changing the Build Process
|
|
||||||
If you want to make modifications that can't be managed by arbitrary file inclusion or changing the software package lists, you may want to introduce additional changes to the image configuration that's run during the chroot. This is fairly easy to do. Simply modify `<basedir>/extra/pre-build.d/root/pre-build.sh` with the changes you desire. Note that this has a `.sh` file extension, but it can be any type of script you want -- Bash, Perl, Python, etc. -- it just needs the shebang line at the beginning of the script.
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
|||||||
=== Starting a Desktop Environment
|
|
||||||
You can install any desktop environment or window manager you would like via <<changing_the_installed_software,package lists>>! From there, it's simply a matter of setting the correct Systemd unit to start automatically. The https://wiki.archlinux.org/index.php/[Arch wiki^] has a lot of useful information here. As an example, I'll include http://lxde.org/[LXDE^] instructions here.
|
|
||||||
|
|
||||||
Simply create a symlink for the target. In the `<basedir>/overlay/etc/systemd/system/` directory:
|
|
||||||
|
|
||||||
ln -s /usr/lib/systemd/system/lxdm.service display-manager.service
|
|
||||||
|
|
||||||
==== Autologin (LXDE)
|
|
||||||
Many desktop environments even offer an automatic login feature directly through the desktop manager (LXDM, in LXDE's case).
|
|
||||||
|
|
||||||
Again, using LXDE as an example, create a file at `<basedir>/overlay/etc/lxdm/lxdm.conf`:
|
|
||||||
|
|
||||||
[base]
|
|
||||||
autologin=bdisk
|
|
||||||
greeter=/usr/lib/lxdm/lxdm-greeter-gtk
|
|
||||||
[server]
|
|
||||||
arg=/usr/bin/X -background vt1
|
|
||||||
[display]
|
|
||||||
gtk_theme=Adwaita
|
|
||||||
bottom_pane=1
|
|
||||||
lang=1
|
|
||||||
keyboard=0
|
|
||||||
theme=Industrial
|
|
||||||
[input]
|
|
||||||
[userlist]
|
|
||||||
disable=0
|
|
||||||
white=
|
|
||||||
black=
|
|
||||||
|
|
||||||
LXDE will then automatically log in with the user `bdisk` (note the second line, right under `[base]`) whenever started.
|
|
@ -1,20 +0,0 @@
|
|||||||
=== Changing the Installed Software
|
|
||||||
BDisk comes with a large https://bdisk.square-r00t.net/packages/[list of software^] installed in the build instance by default, ranging from data recovery (such as _foremost_, _scalpel_, _ddrescue_, etc.), security and data wiping (_nwipe_, _scrub_, etc.), penetration testing (_wifite_, _aircrack-ng_, etc.) and a slew of others. Seriously, if you're looking for a tool, changes are it's on it.
|
|
||||||
|
|
||||||
However, this leads to a fairly long build time- even with a local repository mirror (many of the packages are from the AUR). You may want to replace the list with a smaller subset.
|
|
||||||
|
|
||||||
The `iso.pkgs.\*` files are not files you should modify- they contain software necessary to the building of BDisk and are the basic necessary files to build a bootable image. However, the `packages.*` files are where you would add or remove software to be installed.
|
|
||||||
|
|
||||||
NOTE: The package lists can contain both https://www.archlinux.org/packages/[Arch repository packages^] *and* https://aur.archlinux.org/[AUR^] packages.
|
|
||||||
|
|
||||||
NOTE: Blank lines are ignored, and you can comment out lines by prefixing the line with the `#` character.
|
|
||||||
|
|
||||||
==== `<basedir>/extra/pre-build.d/i686/root/packages.arch`
|
|
||||||
This list contains packages to *only* be installed for the i686 image.
|
|
||||||
|
|
||||||
==== `<basedir>/extra/pre-build.d/x86_64/root/packages.arch`
|
|
||||||
This list contains packages you *only* want installed in the x86_64 image.
|
|
||||||
|
|
||||||
==== `<basedir>/extra/pre-build.d/root/packages.both`
|
|
||||||
This file contains packages for both architectures (i686 and x86_64).
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
|||||||
=== SSH Pubkey Authentication
|
|
||||||
To start with, you'll want to secure SSH a little more than normal.
|
|
||||||
|
|
||||||
I highly recommend https://stribika.github.io/2015/01/04/secure-secure-shell.html[this article^], which we'll be following in this process.
|
|
||||||
|
|
||||||
First, create a file: `<basedir>/overlay/etc/ssh/sshd_config` using the following. Comments and blank lines have been stripped out for brevity.
|
|
||||||
|
|
||||||
PermitRootLogin prohibit-password
|
|
||||||
HostKey /etc/ssh/ssh_host_ed25519_key
|
|
||||||
HostKey /etc/ssh/ssh_host_rsa_key
|
|
||||||
AuthorizedKeysFile .ssh/authorized_keys
|
|
||||||
PasswordAuthentication no
|
|
||||||
PermitEmptyPasswords no
|
|
||||||
ChallengeResponseAuthentication no
|
|
||||||
UsePAM yes
|
|
||||||
PrintMotd no # pam does that
|
|
||||||
Subsystem sftp /usr/lib/ssh/sftp-server
|
|
||||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
|
||||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
|
||||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
|
||||||
|
|
||||||
We'll also want to implement a more secure `ssh_config` file to avoid possible leaks. The following is `<basedir>/overlay/etc/ssh/ssh_config`:
|
|
||||||
|
|
||||||
Host *
|
|
||||||
KexAlgorithms curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
|
|
||||||
PasswordAuthentication no
|
|
||||||
ChallengeResponseAuthentication no
|
|
||||||
PubkeyAuthentication yes
|
|
||||||
HostKeyAlgorithms ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ssh-ed25519,ssh-rsa
|
|
||||||
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
|
|
||||||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
|
||||||
|
|
||||||
We'll want to create our own moduli. This can take a long time, but only needs to be done once -- it doesn't need to be done for every build. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
|
||||||
|
|
||||||
ssh-keygen -G moduli.all -b 4096
|
|
||||||
ssh-keygen -T moduli.safe -f moduli.all
|
|
||||||
mv moduli.safe moduli
|
|
||||||
rm moduli.all
|
|
||||||
|
|
||||||
Then we generate hostkeys. This isn't strictly necessary as the live media will create them automatically when starting SSH if they're missing, but this does provide some verification that the host you're SSHing to is, in fact, running the BDisk instance that you yourself built. The following commands should be run in `<basedir>/overlay/etc/ssh/`:
|
|
||||||
|
|
||||||
ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
|
||||||
ssh-keygen -t rsa -b 4096 -f ssh_host_rsa_key -N "" < /dev/null
|
|
||||||
|
|
||||||
Make sure you have keys on your host workstation generated so you can SSH into BDisk. If you don't have any ED25519 or RSA SSH keys, this will create them for you. The following should be run as the host (build machine, or what have you) user you want to be able to SSH into BDisk as:
|
|
||||||
|
|
||||||
ssh-keygen -t ed25519 -o -a 100
|
|
||||||
ssh-keygen -t rsa -b 4096 -o -a 100
|
|
||||||
|
|
||||||
The defaults are fine. Adding a password to your private key is not necessary, but recommended (though note that doing so will inhibit automated SSHing). You should now have in `~/.ssh/` the following files (assuming you kept the defaults above):
|
|
||||||
|
|
||||||
id_ed25519
|
|
||||||
id_ed25519.pub
|
|
||||||
id_rsa
|
|
||||||
id_rsa.pub
|
|
||||||
|
|
||||||
WARNING: The files ending in *.pub* are _public_ -- they can be published anywhere. However, the ones that are not appended with *.pub* are your _private keys_ and should not be shared with anyone, whether they're password-protected or not!
|
|
||||||
|
|
||||||
Now you'll want to get the public key of your SSH keys so you can add them to your BDisk build. The following commands should be run in `<basedir>/overlay/`:
|
|
||||||
|
|
||||||
mkdir -p root/.ssh
|
|
||||||
chmod 700 root/.ssh
|
|
||||||
touch root/.ssh/authorized_keys
|
|
||||||
chmod 600 root/.ssh/authorized_keys
|
|
||||||
cat ~/.ssh/id_{ed25519,rsa}.pub > root/.ssh/authorized_keys
|
|
||||||
|
|
||||||
If you decided to <<code_user_code,enable a regular non-root user>> in your build, you'll want to perform the same steps above for the regular user as well (or forego the above and just enable SSH for the user you create). Remember to replace `root/` with `home/<<_code_username_code,<username>>>/`!
|
|
||||||
|
|
||||||
Lastly, we need to enable SSH to start on boot. Run the following command in `<basedir>/overlay/etc/systemd/system/multi-user.target.wants/`:
|
|
||||||
|
|
||||||
ln -s /usr/lib/systemd/system/sshd.service sshd.service
|
|
||||||
|
|
||||||
You should now have SSH automatically start once the instance boots.
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
=== VPN Configuration
|
|
||||||
For this example we'll set up an https://openvpn.net/[OpenVPN^] client to start automatically on boot.
|
|
||||||
|
|
||||||
Setting up an OpenVPN server is outside the scope of this section, but there are a https://openvpn.net/index.php/open-source/documentation/howto.html[multitude^] of https://openvpn.net/index.php/open-source/documentation/examples.html[useful^] https://wiki.archlinux.org/index.php/OpenVPN[documentation^] https://wiki.gentoo.org/wiki/Openvpn[sources^] out there that will help you with that.
|
|
||||||
|
|
||||||
However, once you have your client .ovpn file (in our example, we'll call it `client.ovpn`) you can add it to the build relatively easily.
|
|
||||||
|
|
||||||
Copy `client.ovpn` as `<basedir>/overlay/etc/openvpn/client/client.conf` -- note the changed file extension. Then, in the `<basedir>/overlay/etc/systemd/system/multi-user.target.wants/` directory, issue these commands:
|
|
||||||
|
|
||||||
ln -s /usr/lib/systemd/system/openvpn-client\@.service openvpn-client\@client.service
|
|
||||||
|
|
||||||
OpenVPN will then start on boot in the built BDisk instance.
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
=== bdisk/
|
|
||||||
This directory contains the "heart" of BDisk. It essentially is a Python module package. It contains several python "subpackages" split into different files that provide different functions for BDisk. Chances are you won't ever need to touch anything in here.
|
|
||||||
|
|
||||||
* <<code_bchroot_py_code>>
|
|
||||||
* <<code_bdisk_py_code>>
|
|
||||||
* <<code_bgpg_py_code>>
|
|
||||||
* <<code_bssl_py_code>>
|
|
||||||
* <<code_bsync_py_code>>
|
|
||||||
* <<code_build_py_code>>
|
|
||||||
* <<code_host_py_code>>
|
|
||||||
* <<code_ipxe_py_code>>
|
|
||||||
* <<code_prep_py_code>>
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
|||||||
=== docs/
|
|
||||||
This directory contains various documentation and other helpful text.
|
|
||||||
|
|
||||||
==== COPYING
|
|
||||||
This contains BDisk's license, the GPLv3.
|
|
||||||
|
|
||||||
==== LICENSE
|
|
||||||
This is simply a link to `COPYING`.
|
|
||||||
|
|
||||||
==== manual/
|
|
||||||
This directory contains the documentation source you're reading right now! It's written in http://asciidoc.org/[asciidoc^] (well, to be more precise it's written in/has some http://asciidoctor.org/[asciidoctor^]-isms). I'd recommend reading the rendered version, as the source (while perfectly human-readable) is written in a very modular fashion so it may be inconvenient to read each source file and following include directives.
|
|
||||||
|
|
||||||
==== README
|
|
||||||
This is a placeholder for common convention; it simply tells you to read the manual (and where to find it/build it).
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
=== examples/
|
|
||||||
This directory contains example filesystem layouts for e.g. your webserver (for iPXE), or your PXE server via TFTP.
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
|||||||
=== extra/
|
|
||||||
This directory contains multiple "support files" for BDisk building.
|
|
||||||
|
|
||||||
==== bdisk.png
|
|
||||||
This file is used for bootloader graphics. If you change the name of the project, this can be named something different -- see <<code_uxname_code,the section on uxname>>.
|
|
||||||
|
|
||||||
==== bin/
|
|
||||||
This directory contains sample code or extra tools that don't have anything to do with BDisk normal operation but are useful in building a BDisk distribution.
|
|
||||||
|
|
||||||
==== dist.build.ini
|
|
||||||
This is the "source-provided"/upstream example <<the_code_build_ini_code_file,`build.ini`>>. It will be sourced for any missing configuration options or the like.
|
|
||||||
|
|
||||||
==== external/
|
|
||||||
This directory contains external source code for use with extra features in BDisk that would otherwise be inconvenient to fetch and build dynamically.
|
|
||||||
|
|
||||||
==== pkg.build.ini
|
|
||||||
This is the recommended default <<the_code_build_ini_code_file,`build.ini`>> file for packagers of distro repositories to use when packaging BDisk for inclusion in a package manager.
|
|
||||||
|
|
||||||
include::PREBUILD.adoc[]
|
|
||||||
|
|
||||||
include::TEMPLATES.adoc[]
|
|
@ -1,13 +0,0 @@
|
|||||||
=== overlay/
|
|
||||||
This directory follows similar rules to the <<pre_build_d,pre-build.d/>> directory, except it is applied *after* the chroots are prepared (as it is designed to be user-centric rather than core functionality). We'll go more into this later in-depth, as this is where most of your customizations will be done.
|
|
||||||
|
|
||||||
For files that should be included in both chroots, simply recreate the path with the desired file. For instance, if you want a file `/etc/foo/bar.conf` to exist in both i686 and x86_64 versions, it would exist as the path `overlay/etc/foo/bar.conf`.
|
|
||||||
|
|
||||||
It follows the following structure:
|
|
||||||
|
|
||||||
==== i686/
|
|
||||||
This contains modifications that should be applied to the i686 version only. If you wanted a file to exist only in the i686 version at `/etc/a/b.conf`, it would be placed in `overlay/i686/etc/a/b.conf`.
|
|
||||||
|
|
||||||
==== x86_64/
|
|
||||||
This contains modifications that should be applied to the x86_64 version only. If you wanted a file to exist only in the x86_64 version at `/etc/z/y.conf`, it would be placed in `overlay/x86_64/etc/z/y.conf`.
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
==== pre-build.d/
|
|
||||||
This file contains a "core" overlay. Generally these files shouldn't be modified unless you know what you're doing, but there are some interesting things you can do in here. Generally speaking, though, you'll want to place your modifications in the <<overlay_2,`overlay/`>> directory.
|
|
||||||
|
|
||||||
For files that should be included in both chroots, simply recreate the path with the desired file. For instance, if you want a file `/etc/foo/bar.conf` to exist in both i686 and x86_64 versions, it would exist as the path `pre-build.d/etc/foo/bar.conf`.
|
|
||||||
|
|
||||||
It follows the following structure:
|
|
||||||
|
|
||||||
===== i686/
|
|
||||||
This contains modifications that should be applied to the i686 version *only*. If you wanted a file to exist only in the i686 version at `/etc/a/b.conf`, it would be placed in `pre-build.d/i686/etc/a/b.conf`.
|
|
||||||
|
|
||||||
===== x86_64/
|
|
||||||
This contains modifications that should be applied to the x86_64 version *only*. If you wanted a file to exist only in the x86_64 version at `/etc/z/y.conf`, it would be placed in `pre-build.d/x86_64/etc/z/y.conf`.
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
|||||||
==== templates/
|
|
||||||
This directory contains dynamic templates used for dynamic configuration building and other such things. They are written in http://jinja.pocoo.org/[Jinja2^]. If you haven't used Jinja2 before, the http://jinja.pocoo.org/docs/dev/templates/[templating documentation^] will prove to be very useful.
|
|
||||||
|
|
||||||
This allows you to customize low-level behaviour of BDisk without modifying the source.
|
|
||||||
|
|
||||||
===== BIOS/
|
|
||||||
The `isolinux.cfg.arch.j2` template controls boot options for the single-arch versions of BDisk. In other words if you only build an i686 or only an x86_64 version, this is the template that would be used for BIOS boot mode.
|
|
||||||
|
|
||||||
The `isolinux.cfg.multi.j2` is used for multi-arch. It manages booting for both i686 and x86_64 versions.
|
|
||||||
|
|
||||||
These files will let you change the behaviour of booting in BIOS systems. The menu colour, the menu entries, the menu default, etc.
|
|
||||||
|
|
||||||
===== EFI/
|
|
||||||
The files in here are https://www.freedesktop.org/wiki/Software/systemd/systemd-boot/[systemd-boot^] configurations. The distributed defaults are:
|
|
||||||
|
|
||||||
`base.conf.j2`, which controls the "main"/default entry.
|
|
||||||
|
|
||||||
`loader.conf.j2`, the meta configuration file which tells the loader which entry to load by default and which entries to include.
|
|
||||||
|
|
||||||
`ram.conf.j2` which allows BDisk to run entirely from RAM.
|
|
||||||
|
|
||||||
`uefi1.conf.j2` which provides a UEFI shell (for older UEFI systems).
|
|
||||||
|
|
||||||
`uefi2.conf.j2` which provides a UEFI shell (for newer UEFI systems).
|
|
||||||
|
|
||||||
===== GPG.j2
|
|
||||||
This file contains default parameters for the https://www.gnupg.org/documentation/manuals/gnupg/Unattended-GPG-key-generation.html[GPG key generation], if we need to automatically generate a key.
|
|
||||||
|
|
||||||
===== iPXE/
|
|
||||||
This directory holds templates for iPXE/mini builds.
|
|
||||||
|
|
||||||
The `BIOS/` directory is similar to <<bios, BIOS/>> mentioned above, but it only needs one configuration file and is a much more minimal design (since its entire purpose is to chainload to the iPXE loader).
|
|
||||||
|
|
||||||
The `EFI/` directory is similar to <<efi, EFI/>> above also, but needs fewer configuration files (its only purpose is to bootstrap iPXE).
|
|
||||||
|
|
||||||
`EMBED.j2` is the iPXE http://ipxe.org/scripting[embedded script^] (http://ipxe.org/embed[more info^]). This is what chainloads the remote resources (kernel, intird, squashed filesystem images, and so forth).
|
|
||||||
|
|
||||||
The `patches/` directory largely control branding of the mini ISO. They are in https://www.gnu.org/software/diffutils/manual/html_node/Unified-Format.html[unified diff^] (or "patch") format.
|
|
||||||
|
|
||||||
===== overlay/
|
|
||||||
This directory contains *templated* overlays. These are intended to be templated by the user. See <<overlay_2, the overlay section>> for more information on how to use this. Remember to suffix your template files with the `.j2` extension.
|
|
||||||
|
|
||||||
===== pre-build.d/
|
|
||||||
This directory contains *templated* overlays. These are intended to not be managed by the user, as they handle configuration necessary for building an ISO. See <<pre_build_d, the pre-build.d section>> for more information on this.
|
|
||||||
|
|
||||||
===== VERSION_INFO.txt.j2
|
|
||||||
This template specifies a VERSION_INFO.txt file placed in various locations throughout the builds to help identify which version, build, etc. the ISO is.
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
<?php
|
|
||||||
print '#!ipxe
|
|
||||||
|
|
||||||
cpuid --ext 29 && set bit_type 64 || set bit_type 32
|
|
||||||
initrd example.${bit_type}.img
|
|
||||||
kernel example.${bit_type}.kern initrd=example.${bit_type}.img ip=:::::eth0:dhcp archiso_http_srv=http://domain.tld/path/to/squashes/ archisobasedir=EXAMPLE archisolabel=EXAMPLE checksum=y
|
|
||||||
boot
|
|
||||||
'
|
|
||||||
?>
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL INITRD IMAGE. REPLACE WITH ACTUAL INITRD.
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL KERNEL FILE. REPLACE WITH ACTUAL KERNEL
|
|
@ -1 +0,0 @@
|
|||||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL SQUASHED FILESYSTEM FILE. REPLACE WITH ACTUAL SQUASHED FILESYSTEM
|
|
@ -1 +0,0 @@
|
|||||||
ada655a13f53702b3fe13cae001ab14f741e10c2bb83869048d4c18e74111c12 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
c18bde6e20c195bfb0a018b5c13dc420 airootfs.sfs
|
|
@ -1 +0,0 @@
|
|||||||
NOT A REAL SQUASHED FILESYSTEM FILE. REPLACE WITH ACTUAL SQUASHED FILESYSTEM
|
|
@ -1 +0,0 @@
|
|||||||
ada655a13f53702b3fe13cae001ab14f741e10c2bb83869048d4c18e74111c12 airootfs.sfs
|
|
BIN
extra/aif.png
BIN
extra/aif.png
Binary file not shown.
Before Width: | Height: | Size: 152 B |
BIN
extra/bdisk.png
BIN
extra/bdisk.png
Binary file not shown.
Before Width: | Height: | Size: 1.1 MiB |
Binary file not shown.
@ -1,8 +0,0 @@
|
|||||||
#!/bin/env python3
|
|
||||||
import crypt
|
|
||||||
import getpass
|
|
||||||
|
|
||||||
password = getpass.getpass("\nWhat password would you like to hash/salt?\n(NOTE: will NOT echo back!)\n")
|
|
||||||
salt = crypt.mksalt(crypt.METHOD_SHA512)
|
|
||||||
salthash = crypt.crypt(password, salt)
|
|
||||||
print("\nYour salted hash is:\n\t{0}\n".format(salthash))
|
|
@ -1,103 +0,0 @@
|
|||||||
###########################################################
|
|
||||||
## BUILD.CONF SAMPLE FILE ##
|
|
||||||
###########################################################
|
|
||||||
#
|
|
||||||
# This file is used to define various variables/settings
|
|
||||||
# used by the build script.
|
|
||||||
#
|
|
||||||
# For full (perhaps overly-verbose ;) documentation, please
|
|
||||||
# see:
|
|
||||||
# https://bdisk.square-r00t.net/#_the_code_build_ini_code_file
|
|
||||||
# Or simply refer to the section titled "The build.ini File"
|
|
||||||
# in the user manual.
|
|
||||||
|
|
||||||
[bdisk]
|
|
||||||
name = BDISK
|
|
||||||
uxname = bdisk
|
|
||||||
pname = BDisk
|
|
||||||
ver =
|
|
||||||
dev = r00t^2
|
|
||||||
email = bts@square-r00t.net
|
|
||||||
desc = j00 got 0wnz0r3d lulz.
|
|
||||||
uri = https://bdisk.square-r00t.net
|
|
||||||
root_password =
|
|
||||||
user = yes
|
|
||||||
|
|
||||||
[user]
|
|
||||||
username = ${bdisk:uxname}
|
|
||||||
name = Default user
|
|
||||||
groups = ${bdisk:uxname},admin
|
|
||||||
password =
|
|
||||||
|
|
||||||
[source_x86_64]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey = 7F2D434B9741E8AC
|
|
||||||
gpgkeyserver =
|
|
||||||
|
|
||||||
[source_i686]
|
|
||||||
mirror = mirror.us.leaseweb.net
|
|
||||||
mirrorproto = https
|
|
||||||
mirrorpath = /archlinux/iso/latest/
|
|
||||||
mirrorfile =
|
|
||||||
mirrorchksum = ${mirrorpath}sha1sums.txt
|
|
||||||
chksumtype = sha1
|
|
||||||
mirrorgpgsig =
|
|
||||||
gpgkey = 7F2D434B9741E8AC
|
|
||||||
gpgkeyserver =
|
|
||||||
|
|
||||||
[build]
|
|
||||||
gpg = no
|
|
||||||
dlpath = /var/tmp/${bdisk:uxname}
|
|
||||||
chrootdir = /var/tmp/chroots
|
|
||||||
basedir = /opt/dev/bdisk
|
|
||||||
isodir = ${dlpath}/iso
|
|
||||||
srcdir = ${dlpath}/src
|
|
||||||
prepdir = ${dlpath}/temp
|
|
||||||
archboot = ${prepdir}/${bdisk:name}
|
|
||||||
mountpt = /mnt/${bdisk:uxname}
|
|
||||||
#multiarch = yes
|
|
||||||
multiarch = x86_64
|
|
||||||
sign = yes
|
|
||||||
ipxe = no
|
|
||||||
i_am_a_racecar = yes
|
|
||||||
|
|
||||||
[gpg]
|
|
||||||
mygpgkey =
|
|
||||||
mygpghome = ${build:dlpath}/.gnupg
|
|
||||||
|
|
||||||
[sync]
|
|
||||||
http = no
|
|
||||||
tftp = no
|
|
||||||
git = no
|
|
||||||
rsync = no
|
|
||||||
|
|
||||||
[http]
|
|
||||||
path = ${build:dlpath}/http
|
|
||||||
user = http
|
|
||||||
group = http
|
|
||||||
|
|
||||||
[tftp]
|
|
||||||
path = ${build:dlpath}/tftpboot
|
|
||||||
user = root
|
|
||||||
group = root
|
|
||||||
|
|
||||||
[ipxe]
|
|
||||||
iso = no
|
|
||||||
uri = https://bdisk.square-r00t.net
|
|
||||||
ssldir = ${build:dlpath}/ssl
|
|
||||||
ssl_ca = ${ssldir}/ca.crt
|
|
||||||
ssl_cakey = ${ssldir}/ca.key
|
|
||||||
ssl_crt = ${ssldir}/main.crt
|
|
||||||
ssl_key = ${ssldir}/main.key
|
|
||||||
|
|
||||||
[rsync]
|
|
||||||
host =
|
|
||||||
user =
|
|
||||||
path =
|
|
||||||
iso = yes
|
|
3
extra/external/SMC_DumpKey/README
vendored
3
extra/external/SMC_DumpKey/README
vendored
@ -1,3 +0,0 @@
|
|||||||
taken with graces to http://www.contrib.andrew.cmu.edu/~somlo/OSXKVM/
|
|
||||||
|
|
||||||
Needs to be compiled on linux with gcc, and only runs on genuine Apple hardware (as it polls the SMC chip for the given value)
|
|
193
extra/external/SMC_DumpKey/SmcDumpKey.c
vendored
193
extra/external/SMC_DumpKey/SmcDumpKey.c
vendored
@ -1,193 +0,0 @@
|
|||||||
/*
|
|
||||||
* prints out 4-character name of the SMC key at given index position;
|
|
||||||
*
|
|
||||||
* by Gabriel L. Somlo <somlo@cmu.edu>, Summer 2014
|
|
||||||
*
|
|
||||||
* Compile with: gcc -O2 -o SmcDumpKey SmcDumpKey.c -Wall
|
|
||||||
*
|
|
||||||
* You probably want to "modprobe -r applesmc" before running this...
|
|
||||||
*
|
|
||||||
* Code bits and pieces shamelessly ripped from the linux kernel driver
|
|
||||||
* (drivers/hwmon/applesmc.c by N. Boichat and H. Rydberg)
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License v2 as published by the
|
|
||||||
* Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
||||||
* more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License along with
|
|
||||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <ctype.h>
|
|
||||||
#include <sys/io.h>
|
|
||||||
#include <linux/byteorder/little_endian.h>
|
|
||||||
|
|
||||||
|
|
||||||
#define APPLESMC_START 0x300
|
|
||||||
#define APPLESMC_RANGE 0x20
|
|
||||||
|
|
||||||
#define APPLESMC_DATA_PORT (APPLESMC_START + 0x00)
|
|
||||||
#define APPLESMC_CMD_PORT (APPLESMC_START + 0x04)
|
|
||||||
|
|
||||||
#define APPLESMC_READ_CMD 0x10
|
|
||||||
#define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12
|
|
||||||
#define APPLESMC_GET_KEY_TYPE_CMD 0x13
|
|
||||||
|
|
||||||
|
|
||||||
/* wait up to 128 ms for a status change. */
|
|
||||||
#define APPLESMC_MIN_WAIT 0x0010
|
|
||||||
#define APPLESMC_RETRY_WAIT 0x0100
|
|
||||||
#define APPLESMC_MAX_WAIT 0x20000
|
|
||||||
|
|
||||||
|
|
||||||
#define APPLESMC_KEY_NAME_LEN 4
|
|
||||||
#define APPLESMC_KEY_TYPE_LEN 4
|
|
||||||
|
|
||||||
typedef struct key_type {
|
|
||||||
uint8_t data_len;
|
|
||||||
uint8_t data_type[APPLESMC_KEY_TYPE_LEN];
|
|
||||||
uint8_t flags;
|
|
||||||
} __attribute__((packed)) key_type;
|
|
||||||
|
|
||||||
|
|
||||||
/* wait_read - Wait for a byte to appear on SMC port. */
|
|
||||||
static int
|
|
||||||
wait_read(void)
|
|
||||||
{
|
|
||||||
uint8_t status;
|
|
||||||
int us;
|
|
||||||
|
|
||||||
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
|
|
||||||
usleep(us);
|
|
||||||
status = inb(APPLESMC_CMD_PORT);
|
|
||||||
/* read: wait for smc to settle */
|
|
||||||
if (status & 0x01)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "wait_read() fail: 0x%02x\n", status);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*send_byte - Write to SMC port, retrying when necessary. */
|
|
||||||
static int
|
|
||||||
send_byte(uint8_t cmd, unsigned short port)
|
|
||||||
{
|
|
||||||
uint8_t status;
|
|
||||||
int us;
|
|
||||||
|
|
||||||
outb(cmd, port);
|
|
||||||
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
|
|
||||||
usleep(us);
|
|
||||||
status = inb(APPLESMC_CMD_PORT);
|
|
||||||
/* write: wait for smc to settle */
|
|
||||||
if (status & 0x02)
|
|
||||||
continue;
|
|
||||||
/* ready: cmd accepted, return */
|
|
||||||
if (status & 0x04)
|
|
||||||
return 0;
|
|
||||||
/* timeout: give up */
|
|
||||||
if (us << 1 == APPLESMC_MAX_WAIT)
|
|
||||||
break;
|
|
||||||
/* busy: long wait and resend */
|
|
||||||
usleep(APPLESMC_RETRY_WAIT);
|
|
||||||
outb(cmd, port);
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr,
|
|
||||||
"send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
send_argument(const uint8_t *key)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < APPLESMC_KEY_NAME_LEN; i++)
|
|
||||||
if (send_byte(key[i], APPLESMC_DATA_PORT))
|
|
||||||
return -1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
read_smc(uint8_t cmd, const uint8_t *key, uint8_t *buf, uint8_t len)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (send_byte(cmd, APPLESMC_CMD_PORT) || send_argument(key)) {
|
|
||||||
fprintf(stderr, "%.4s: read arg fail\n", key);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (send_byte(len, APPLESMC_DATA_PORT)) {
|
|
||||||
fprintf(stderr, "%.4s: read len fail\n", key);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < len; i++) {
|
|
||||||
if (wait_read()) {
|
|
||||||
fprintf(stderr, "%.4s: read data[%d] fail\n", key, i);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
buf[i] = inb(APPLESMC_DATA_PORT);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int
|
|
||||||
main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
key_type kt;
|
|
||||||
uint8_t data_buf[UCHAR_MAX];
|
|
||||||
uint8_t i;
|
|
||||||
|
|
||||||
if (argc != 2 || strlen(argv[1]) != APPLESMC_KEY_NAME_LEN) {
|
|
||||||
fprintf(stderr, "\nUsage: %s <4-char-key-name>\n\n", argv[0]);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ioperm(APPLESMC_START, APPLESMC_RANGE, 1) != 0) {
|
|
||||||
perror("ioperm failed");
|
|
||||||
return -2;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (read_smc(APPLESMC_GET_KEY_TYPE_CMD,
|
|
||||||
(uint8_t *)argv[1], (uint8_t *)&kt, sizeof(kt)) != 0) {
|
|
||||||
fprintf(stderr, "\nread_smc get_key_type error\n\n");
|
|
||||||
return -3;
|
|
||||||
}
|
|
||||||
printf(" type=\"");
|
|
||||||
for (i = 0; i < APPLESMC_KEY_TYPE_LEN; i++)
|
|
||||||
printf(isprint(kt.data_type[i]) ? "%c" : "\\x%02x",
|
|
||||||
(uint8_t)kt.data_type[i]);
|
|
||||||
printf("\" length=%d flags=%x\n", kt.data_len, kt.flags);
|
|
||||||
|
|
||||||
if (read_smc(APPLESMC_READ_CMD,
|
|
||||||
(uint8_t *)argv[1], data_buf, kt.data_len) != 0) {
|
|
||||||
fprintf(stderr, "\nread_smc get_key_data error\n\n");
|
|
||||||
return -4;
|
|
||||||
}
|
|
||||||
printf(" data=\"");
|
|
||||||
for (i = 0; i < kt.data_len; i++)
|
|
||||||
printf(isprint(data_buf[i]) ? "%c" : "\\x%02x",
|
|
||||||
(uint8_t)data_buf[i]);
|
|
||||||
printf("\"\n");
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,48 +0,0 @@
|
|||||||
# The modules found in here are for distro-specific differences in the builds.
|
|
||||||
# For instance, if you want to build a Debian-based BDisk, you'd specify pkg['install'] = ['apt-get', '-y', 'install', '%PKG%'],
|
|
||||||
# name this file as "debian.py", and set bdisk:distro as 'debian'.
|
|
||||||
# Note that the guest will need python installed. If distro is set as "NOCHECK", a distro check of the tarball won't be performed
|
|
||||||
# (as the distro check requires python be installed first).
|
|
||||||
|
|
||||||
# Special variables to be used in strings:
|
|
||||||
# %PKG% = the name of a package would be inserted here.
|
|
||||||
|
|
||||||
# This template uses Debian as an example.
|
|
||||||
|
|
||||||
# The name of the distro. Must match the output from platform.linux_distribution()[0].lower()
|
|
||||||
# Regex is supported.
|
|
||||||
distro = 'debian'
|
|
||||||
|
|
||||||
# The path to python. Can be either python 2.x (2.6 or higher) or 3.x.
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
|
|
||||||
guestenv = {}
|
|
||||||
# The following environment variables will be set for the guest.
|
|
||||||
guestenv['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
|
|
||||||
scripts = {}
|
|
||||||
# This variable can be used to perform some additional system tweaks and such. This is run before package installation.
|
|
||||||
# It must be formatted as a complete script- i.e. include a shebang etc.
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This variable can be used to perform some additional system tweaks and such. This is run after package installation.
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
pkg = {}
|
|
||||||
# The command, with arguments, in list format that should be run before we install software in the guest.
|
|
||||||
# For instance, if your guest distro requires a local package listing cache (nearly all of them do) to be
|
|
||||||
# updated first, this is where it would be run.
|
|
||||||
pkg['pre'] = ['apt-get', '-y', 'update']
|
|
||||||
|
|
||||||
# The command, with arguments, in a list format to install a package.
|
|
||||||
# Note that the command must be constructed in a way that does not require user interaction.
|
|
||||||
pkg['install'] = ['apt-get', '-y', 'install', '%PKG%']
|
|
||||||
|
|
||||||
# The command, with arguments, in list format to use to check if a package is installed.
|
|
||||||
# It should return 0 on exist status if it's installed. Any other exit status assumes the package is not installed.
|
|
||||||
pkg['check'] = ['dpkg-query', '-f', '${binary:Package}\n', '-W', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'arch'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['pacman', '-Syyy']
|
|
||||||
pkg['install'] = ['apacman', '-S', '%PKG%']
|
|
||||||
pkg['check'] = ['pacman', '-Q', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'centos linux'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,12 +0,0 @@
|
|||||||
distro = 'debian'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
guestenv['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['apt-get', '-q', '-y', 'update']
|
|
||||||
pkg['install'] = ['apt-get', '-q', '-y', '-o Dpkg::Options::="--force-confdef"', '-o Dpkg::Options::="--force-confold"', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['dpkg-query', '-f', "'${binary:Package}\n'", '-W', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'fedora'
|
|
||||||
pybin = '/usr/bin/python3'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
@ -1,11 +0,0 @@
|
|||||||
distro = 'red hat enterprise linux (server|desktop)'
|
|
||||||
pybin = '/usr/bin/python'
|
|
||||||
script['pre'] = """#!/bin/bash
|
|
||||||
touch /root/BDISK
|
|
||||||
"""
|
|
||||||
script['post'] = """#!/bin/bash
|
|
||||||
rm -f /root/BDISK
|
|
||||||
"""
|
|
||||||
pkg['pre'] = ['yum', 'makecache']
|
|
||||||
pkg['install'] = ['yum', '-y', 'install', '%PKG%']
|
|
||||||
pkg['check'] = ['rpm', '-qi', '%PKG']
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user