chroot leaving mount points still... er... mounted. https://github.com/pkgcore/pychroot/issues/22

This commit is contained in:
brent s. 2016-11-19 20:23:14 -05:00
parent f419a6e4f6
commit 7cdf0eef50
5 changed files with 225 additions and 172 deletions

1
.gitignore vendored
View File

@ -38,3 +38,4 @@ src/ipxe_local/ssl/txt
# and we DEFINITELY don't need these.
__pycache__/
*.pyc
bdisk/test.py

View File

@ -1,180 +1,61 @@
#!/usr/bin/env python3
# NOTE: this is almost taken verbatim from https://github.com/pkgcore/pychroot's
# pychroot/scripts/pychroot.py because the pychroot.Chroot method isn't really
# documented very well

#from __future__ import absolute_import, unicode_literals

#from functools import partial
import os
import shutil
import re
import hashlib
import gnupg
import tarfile
import subprocess
import re
import git
import jinja2
import datetime
from urllib.request import urlopen
import sys
import psutil
#from pychroot.base import Chroot
import pychroot

def arch_chk(arch):
if arch in ['i686', 'x86_64']:
return(arch)
else:
exit("{0} is not a valid architecture. Must be one of i686 or x86_64.".format(arch))
#class mountpoints(argparse.Action):
#
# def __call__(self, parser, namespace, values, option_string=None):
# if not getattr(namespace, 'mountpoints', False):
# namespace.mountpoints = {}
# namespace.mountpoints.update(values)

def download_tarball(arch, dlpath):
# arch - should be i686 or x86_64
# returns path/filename e.g. /some/path/to/file.tar.gz
# we use .gnupg since we'll need it later.
arch_chk(arch)
try:
os.makedirs(dlpath + '/.gnupg')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
tarball_path = dlpath + '/.latest.' + arch + '.tar.gz'
#mirror = 'http://mirrors.kernel.org/archlinux'
mirror = 'https://mirror.us.leaseweb.net/archlinux'
rlsdir = mirror + '/iso/latest'
sha_in = urlopen(rlsdir + '/sha1sums.txt')
sha1sums = sha_in.read()
sha_in.close()
sha1_list = sha1sums.decode("utf-8")
sha_list = list(filter(None, sha1_list.split('\n')))
sha_dict = {x.split()[1]: x.split()[0] for x in sha_list}
pattern = re.compile('^archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-' + arch + '\.tar\.gz$')
tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0]
sha1 = sha_dict[tarball]
# all that lousy work just to get a sha1 sum. okay. so.
if os.path.isfile(tarball_path):
pass
else:
# fetch the tarball...
print("Fetching the tarball for {0} architecture, please wait...".format(arch))
tarball_dl = urlopen(rlsdir + tarball)
with open(dlpath + '/latest.' + arch + '.tar.gz', 'wb') as f:
f.write(tarball_dl)
tarball_dl.close()
tarball_hash = hashlib.sha1(open(tarball_path, 'rb').read()).hexdigest()
if tarball_hash != sha1:
exit("There was a failure fetching the tarball and the wrong version exists on the filesystem.\nPlease try again later.")
else:
# okay, so the sha1 matches. let's verify the signature.
# we don't want to futz with the user's normal gpg.
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
key = gpg.gen_key(input_data)
keyid = '7F2D434B9741E8AC'
gpg.recv_keys('pgp.mit.edu', keyid)
gpg_sig = tarball + '.sig'
sig_dl = urlopen(rlsdir + gpg_sig)
with open(tarball_path + '.sig', 'wb+') as f:
f.write(sig_dl)
sig_dl.close()
sig = tarball_path + '.sig'
tarball_data = open(tarball_path, 'rb')
tarball_data_in = tarball_data.read()
gpg_verify = gpg.verify_data(sig, tarball_data_in)
tarball_data.close()
if not gpg_verify:
exit("There was a failure checking the signature of the release tarball. Please investigate.")
os.remove(sig)
def chroot(chrootdir, chroot_hostname, cmd):
# MOUNT the chroot
mountpoints = psutil.disk_partitions(all = True)
mounts = []
for m in mountpoints:
mounts.append(m.mountpoint)
cmnts = {}
# mount the chrootdir... onto itself. as a bind mount. it's so stupid, i know. see https://bugs.archlinux.org/task/46169
if chrootdir not in mounts:
#cmnts[chrootdir + ':' + chrootdir] = {'recursive': False, 'readonly': False, 'create': False}
cmnts[chrootdir + ':/'] = {'recursive': False, 'readonly': False, 'create': False}

return(tarball_path)
# mount -t proc to chrootdir + '/proc' here
if (chrootdir + '/proc') not in mounts:
cmnts['proc:/proc'] = {'recursive': True, 'create': True}

def unpack_tarball(tarball_path, chrootdir):
# Make the dir if it doesn't exist
try:
os.makedirs(chrootdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Open and extract the tarball
tar = tarfile.open(tarball_path, 'r:gz')
tar.extractall(path = destdir)
tar.close()
return(True)
# rbind mount /sys to chrootdir + '/sys' here
if (chrootdir + '/sys') not in mounts:
#cmnts['/sys:/sys'] = {'recursive': True, 'create': True} # if the below doesn't work, try me. can also try ['sysfs:/sys']
cmnts['/sys'] = {'recursive': True, 'create': True}

def build_chroot(arch, chrootdir, dlpath, extradir):
unpack_me = unpack_tarball(download_tarball(arch_chk(arch), dlpath), chrootdir)
if unpack_me:
pass
else:
exit("Something went wrong when trying to unpack the tarball.")
# rbind mount /dev to chrootdir + '/dev' here
if (chrootdir + '/dev') not in mounts:
cmnts['/dev'] = {'recursive': True, 'create': True}

print("The download and extraction has completed. Now prepping the chroot environment with some additional changes.")
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
prebuild_overlay = {}
prebuild_arch_overlay = {}
for x in ['i686', 'x86_64']:
prebuild_arch_overlay[x] = {}
for y in ['files', 'dirs']:
prebuild_overlay[y] = []
prebuild_arch_overlay[x][y] = []
for path, dirs, files in os.walk(extradir + '/pre-build.d/'):
prebuild_overlay['dirs'].append(path + '/')
for file in files:
prebuild_overlay['files'].append(os.path.join(path, file))
for x in prebuild_overlay.keys():
prebuild_overlay[x][:] = [re.sub('^' + extradir + '/pre-build.d/', '', s) for s in prebuild_overlay[x]]
prebuild_overlay[x] = list(filter(None, prebuild_overlay[x]))
for y in prebuild_arch_overlay.keys():
prebuild_arch_overlay[y][x][:] = [i for i in prebuild_overlay[x] if i.startswith(y)]
prebuild_arch_overlay[y][x][:] = [re.sub('^' + y + '/', '', s) for s in prebuild_arch_overlay[y][x]]
prebuild_arch_overlay[y][x] = list(filter(None, prebuild_arch_overlay[y][x]))
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
prebuild_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
for dir in prebuild_overlay['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
# and copy over the files. again, chown to root.
for file in prebuild_overlay['files']:
shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
# do the same for arch-specific stuff.
for dir in prebuild_arch_overlay[arch]['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
for file in prebuild_arch_overlay[arch]['files']:
shutil.copy2(extradir + '/pre-build.d/' + arch + '/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
return(chrootdir)
# mount the efivars in the chroot if it exists on the host. i mean, why not?
if '/sys/firmware/efi/efivars' in mounts:
if (chrootdir + '/sys/firmware/efi/efivars') not in mounts:
cmnts['/sys/firmware/efi/efivars'] = {'recursive': True}

def prep_chroot(templates_dir, chrootdir, bdisk, arch):
build = {}
# let's prep some variables to write out the version info.txt
# get the git tag and short commit hash
repo = git.Repo(bdisk['dir'])
refs = repo.git.describe(repo.head.commit).split('-')
build['ver'] = refs[0] + '-' + refs[2]
# and these should be passed in from the args, from the most part.
build['name'] = bdisk['name']
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
build['host'] = bdisk['hostname']
build['user'] = os.environ['USER']
if os.environ['SUDO_USER']:
build['realuser'] = os.environ['SUDO_USER']
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
env = jinja2.Environment(loader=FileSystemLoader(templates_dir))
tpl = env.get_template('VERSION_INFO.txt.j2')
tpl_out = template.render(build = build)
with open(chrootdir + '/root/VERSION_INFO.txt', "wb+") as f:
fh.write(tpl_out)
return(build)
if '/run' in mounts:
if (chrootdir + '/run') not in mounts:
cmnts['/run'] = {'recursive': True}

def mount(srcdir, destdir, fstype, options = ''):
if os.geteuid() != 0:
exit("HEY, you need to run this with root privileges because we do mounting and stuff for chroots.")
# thank you https://stackoverflow.com/a/29156997
# it'd be greate to use systemd-nspawn for this, but:
# 1.) not all systems even HAVE systemd, and
# 2.) there's no python interface- i'm trying to keep the shell calls to a minimum.
os.makedirs(destdir, exist_ok = True)
ret = ctypes.CDLL('libc.so.6', use_errno = True).mount(srcdir, destdir, fstype, 0, options)
if ret < 0:
errno = ctypes.get_errno()
raise RuntimeError("Error mounting {0} ({1}) on {2} with options '{3}': {4}".format(srcdir, fstype, destdir, options, os.strerror(errno)))
else:
return(True)

def mount_chroot(chrootdir, destdir):
# here's where we actually set up the mountpoints for a chroot.
mount('', chrootdir + '/proc', 'proc', 'nosuid,noexec,nodev')
pychroot.base.Chroot.default_mounts = {}
chroot = pychroot.base.Chroot(chrootdir, mountpoints = cmnts, hostname = chroot_hostname)
chroot.mount()
with chroot:
import os
os.system(cmd)
chroot.cleanup()

View File

@ -1,5 +1,3 @@
#!/usr/bin/env python3

import os
import sys
import platform

159
bdisk/prep.py Executable file
View File

@ -0,0 +1,159 @@
import os
import shutil
import re
import hashlib
import gnupg
import tarfile
import subprocess
import re
import git
import jinja2
import datetime
from urllib.request import urlopen

def archChk(arch):
if arch in ['i686', 'x86_64']:
return(arch)
else:
exit("{0} is not a valid architecture. Must be one of i686 or x86_64.".format(arch))

def downloadTarball(arch, dlpath):
# arch - should be i686 or x86_64
# returns path/filename e.g. /some/path/to/file.tar.gz
# we use .gnupg since we'll need it later.
archChk(arch)
try:
os.makedirs(dlpath + '/.gnupg')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
tarball_path = dlpath + '/.latest.' + arch + '.tar.gz'
#mirror = 'http://mirrors.kernel.org/archlinux'
mirror = 'https://mirror.us.leaseweb.net/archlinux'
rlsdir = mirror + '/iso/latest'
sha_in = urlopen(rlsdir + '/sha1sums.txt')
sha1sums = sha_in.read()
sha_in.close()
sha1_list = sha1sums.decode("utf-8")
sha_list = list(filter(None, sha1_list.split('\n')))
sha_dict = {x.split()[1]: x.split()[0] for x in sha_list}
pattern = re.compile('^archlinux-bootstrap-[0-9]{4}\.[0-9]{2}\.[0-9]{2}-' + arch + '\.tar\.gz$')
tarball = [filename.group(0) for l in list(sha_dict.keys()) for filename in [pattern.search(l)] if filename][0]
sha1 = sha_dict[tarball]
# all that lousy work just to get a sha1 sum. okay. so.
if os.path.isfile(tarball_path):
pass
else:
# fetch the tarball...
print("Fetching the tarball for {0} architecture, please wait...".format(arch))
tarball_dl = urlopen(rlsdir + tarball)
with open(dlpath + '/latest.' + arch + '.tar.gz', 'wb') as f:
f.write(tarball_dl)
tarball_dl.close()
tarball_hash = hashlib.sha1(open(tarball_path, 'rb').read()).hexdigest()
if tarball_hash != sha1:
exit("There was a failure fetching the tarball and the wrong version exists on the filesystem.\nPlease try again later.")
else:
# okay, so the sha1 matches. let's verify the signature.
# we don't want to futz with the user's normal gpg.
gpg = gnupg.GPG(gnupghome = dlpath + '/.gnupg')
input_data = gpg.gen_key_input(name_email = 'tempuser@nodomain.tld', passphrase = 'placeholder_passphrase')
key = gpg.gen_key(input_data)
keyid = '7F2D434B9741E8AC'
gpg.recv_keys('pgp.mit.edu', keyid)
gpg_sig = tarball + '.sig'
sig_dl = urlopen(rlsdir + gpg_sig)
with open(tarball_path + '.sig', 'wb+') as f:
f.write(sig_dl)
sig_dl.close()
sig = tarball_path + '.sig'
tarball_data = open(tarball_path, 'rb')
tarball_data_in = tarball_data.read()
gpg_verify = gpg.verify_data(sig, tarball_data_in)
tarball_data.close()
if not gpg_verify:
exit("There was a failure checking the signature of the release tarball. Please investigate.")
os.remove(sig)

return(tarball_path)

def unpackTarball(tarball_path, chrootdir):
# Make the dir if it doesn't exist
try:
os.makedirs(chrootdir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# Open and extract the tarball
tar = tarfile.open(tarball_path, 'r:gz')
tar.extractall(path = destdir)
tar.close()
return(True)

def buildChroot(arch, chrootdir, dlpath, extradir):
unpack_me = unpackTarball(downloadTarball(archChk(arch), dlpath), chrootdir)
if unpack_me:
pass
else:
exit("Something went wrong when trying to unpack the tarball.")

print("The download and extraction has completed. Now prepping the chroot environment with some additional changes.")
# build dict of lists of files and dirs from pre-build.d dir, do the same with arch-specific changes.
prebuild_overlay = {}
prebuild_arch_overlay = {}
for x in ['i686', 'x86_64']:
prebuild_arch_overlay[x] = {}
for y in ['files', 'dirs']:
prebuild_overlay[y] = []
prebuild_arch_overlay[x][y] = []
for path, dirs, files in os.walk(extradir + '/pre-build.d/'):
prebuild_overlay['dirs'].append(path + '/')
for file in files:
prebuild_overlay['files'].append(os.path.join(path, file))
for x in prebuild_overlay.keys():
prebuild_overlay[x][:] = [re.sub('^' + extradir + '/pre-build.d/', '', s) for s in prebuild_overlay[x]]
prebuild_overlay[x] = list(filter(None, prebuild_overlay[x]))
for y in prebuild_arch_overlay.keys():
prebuild_arch_overlay[y][x][:] = [i for i in prebuild_overlay[x] if i.startswith(y)]
prebuild_arch_overlay[y][x][:] = [re.sub('^' + y + '/', '', s) for s in prebuild_arch_overlay[y][x]]
prebuild_arch_overlay[y][x] = list(filter(None, prebuild_arch_overlay[y][x]))
prebuild_overlay[x][:] = [y for y in prebuild_overlay[x] if not y.startswith(('x86_64','i686'))]
prebuild_overlay['dirs'].remove('/')
# create the dir structure. these should almost definitely be owned by root.
for dir in prebuild_overlay['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
# and copy over the files. again, chown to root.
for file in prebuild_overlay['files']:
shutil.copy2(extradir + '/pre-build.d/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
# do the same for arch-specific stuff.
for dir in prebuild_arch_overlay[arch]['dirs']:
os.makedirs(chrootdir + '/' + dir, exist_ok = True)
os.chown(chrootdir + '/' + dir, 0, 0)
for file in prebuild_arch_overlay[arch]['files']:
shutil.copy2(extradir + '/pre-build.d/' + arch + '/' + file, chrootdir + '/' + file)
os.chown(chrootdir + '/' + file, 0, 0)
return(chrootdir)

def prepChroot(templates_dir, chrootdir, bdisk, arch):
build = {}
# let's prep some variables to write out the version info.txt
# get the git tag and short commit hash
repo = git.Repo(bdisk['dir'])
refs = repo.git.describe(repo.head.commit).split('-')
build['ver'] = refs[0] + '-' + refs[2]
# and these should be passed in from the args, from the most part.
build['name'] = bdisk['name']
build['time'] = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S UTC %Y")
build['host'] = bdisk['hostname']
build['user'] = os.environ['USER']
if os.environ['SUDO_USER']:
build['realuser'] = os.environ['SUDO_USER']
# and now that we have that dict, let's write out the VERSION_INFO.txt file.
env = jinja2.Environment(loader=FileSystemLoader(templates_dir))
tpl = env.get_template('VERSION_INFO.txt.j2')
tpl_out = template.render(build = build)
with open(chrootdir + '/root/VERSION_INFO.txt', "wb+") as f:
fh.write(tpl_out)
return(build)

View File

@ -0,0 +1,14 @@
LANG=en_US.UTF-8
LC_CTYPE="en_US.UTF-8"
LC_NUMERIC="en_US.UTF-8"
LC_TIME="en_US.UTF-8"
LC_COLLATE="en_US.UTF-8"
LC_MONETARY="en_US.UTF-8"
LC_MESSAGES="en_US.UTF-8"
LC_PAPER="en_US.UTF-8"
LC_NAME="en_US.UTF-8"
LC_ADDRESS="en_US.UTF-8"
LC_TELEPHONE="en_US.UTF-8"
LC_MEASUREMENT="en_US.UTF-8"
LC_IDENTIFICATION="en_US.UTF-8"
LC_ALL=