From f9ebd247e8180cc8967ef30be298655e15bf4ed6 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Tue, 18 Feb 2014 20:45:10 +0100 Subject: [PATCH 01/22] Create taskgraph out of all tasks before filtering. Fixes #6 --- base/tasklist.py | 50 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/base/tasklist.py b/base/tasklist.py index 2803d7a..216c466 100644 --- a/base/tasklist.py +++ b/base/tasklist.py @@ -31,15 +31,26 @@ class TaskList(object): def create_list(self): from common.phases import order + # Get a hold of all tasks + tasks = self.get_all_tasks() + # Make sure the taskset is a subset of all the tasks we have gathered + self.tasks.issubset(tasks) + # Create a graph over all tasks by creating a map of each tasks successors graph = {} - for task in self.tasks: + for task in tasks: + # Do a sanity check first self.check_ordering(task) successors = set() + # Add all successors mentioned in the task successors.update(task.successors) - successors.update(filter(lambda succ: task in succ.predecessors, self.tasks)) + # Add all tasks that mention this task as a predecessor + successors.update(filter(lambda succ: task in succ.predecessors, tasks)) + # Create a list of phases that succeed the phase of this task succeeding_phases = order[order.index(task.phase) + 1:] - successors.update(filter(lambda succ: succ.phase in succeeding_phases, self.tasks)) - graph[task] = filter(lambda succ: succ in self.tasks, successors) + # Add all tasks that occur in above mentioned succeeding phases + successors.update(filter(lambda succ: succ.phase in succeeding_phases, tasks)) + # Map the successors to the task + graph[task] = successors components = self.strongly_connected_components(graph) cycles_found = 0 @@ -52,10 +63,41 @@ class TaskList(object): 'consult the logfile for more information.'.format(cycles_found)) raise TaskListError(msg) + # Run a topological sort on the graph, returning an ordered list sorted_tasks = self.topological_sort(graph) + # Filter out any tasks not in the tasklist + # We want to maintain ordering, so we don't use set intersection + sorted_tasks = filter(lambda task: task in self.tasks, sorted_tasks) return sorted_tasks + def get_all_tasks(self): + # Get a generator that returns all classes in the package + classes = self.get_all_classes('..') + + # lambda function to check whether a class is a task (excluding the superclass Task) + def is_task(obj): + from task import Task + return issubclass(obj, Task) and obj is not Task + return filter(is_task, classes) # Only return classes that are tasks + + # Given a path, retrieve all the classes in it + def get_all_classes(self, path=None): + import pkgutil + import importlib + import inspect + + def walk_error(module): + raise Exception('Unable to inspect module `{module}\''.format(module=module)) + walker = pkgutil.walk_packages(path, '', walk_error) + for _, module_name, _ in walker: + module = importlib.import_module(module_name) + classes = inspect.getmembers(module, inspect.isclass) + for class_name, obj in classes: + # We only want classes that are defined in the module, and not imported ones + if obj.__module__ == module_name: + yield obj + def check_ordering(self, task): for successor in task.successors: if successor.phase > successor.phase: From cb011dabf3ab2459127ba99d387d7ac936974531 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Wed, 19 Feb 2014 22:14:17 +0100 Subject: [PATCH 02/22] Minor cleanup in network module --- common/tasks/network.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/common/tasks/network.py b/common/tasks/network.py index 8418a84..e499929 100644 --- a/common/tasks/network.py +++ b/common/tasks/network.py @@ -1,6 +1,6 @@ from base import Task from common import phases -import os.path +import os class RemoveDNSInfo(Task): @@ -9,10 +9,8 @@ class RemoveDNSInfo(Task): @classmethod def run(cls, info): - from os import remove - import os.path if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')): - remove(os.path.join(info.root, 'etc/resolv.conf')) + os.remove(os.path.join(info.root, 'etc/resolv.conf')) class RemoveHostname(Task): @@ -21,10 +19,8 @@ class RemoveHostname(Task): @classmethod def run(cls, info): - from os import remove - import os.path if os.path.isfile(os.path.join(info.root, 'etc/hostname')): - remove(os.path.join(info.root, 'etc/hostname')) + os.remove(os.path.join(info.root, 'etc/hostname')) class ConfigureNetworkIF(Task): From 47b12ac80772a11931e57cee7ea9d776ecf989bf Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 16:11:10 +0100 Subject: [PATCH 03/22] Adjust README to point at the documentation. Remove outdated CONTRIBUTING.md --- CONTRIBUTING.md | 12 ------------ README.md | 43 ++++++++++++++++--------------------------- 2 files changed, 16 insertions(+), 39 deletions(-) delete mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index fb4a18a..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,12 +0,0 @@ -# Coding standards # -* Specify the full path when invoking a command. -* Use long options whenever possible, this makes the commands invoked a lot easier to understand. -* Use tabs for indentation and spaces for alignment. -* Max line length is 110 chars. -* Multiple assignments may be aligned. -* Follow PEP8 with the exception of the following rules - * E101: Indenting with tabs and aligning with spaces - * E221: Alignment of assignments - * E241: Alignment of assignments - * E501: The line length is 110 characters not 80 - * W191: We indent with tabs not spaces diff --git a/README.md b/README.md index cb64e9f..7d0d12d 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,20 @@ bootstrap-vz =========================================== -bootstrap-vz is a fully automated bootstrapping tool for Debian. -It creates images for various virtualized platforms (at the moment: kvm, virtualbox, ec2). -The plugin architecture allows for heavy modification of standard behavior -(e.g. create a vagrant box, apply puppet manifests, run custom shell commands). +bootstrap-vz is a bootstrapping framework for Debian. +It is is specifically intended to bootstrap systems for virtualized environments. +It runs without any user intervention and generates ready-to-boot images for +[a number of virtualization platforms(http://andsens.github.io/bootstrap-vz/providers.html). +Its aim is to provide a reproducable bootstrapping process using manifests +as well as supporting a high degree of customizability through plugins.
+bootstrap-vz was coded from scratch in python once the bash scripts that were used in the +[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached their +limits. -At no time is the resulting image booted, meaning there are no latent logfiles -or bash_history files. - -The bootstrapper runs on a single json manifest file which contains all configurable -parameters. This allows you to recreate the image whenever you like so you can create -an updated version of an existing image or create the same image in multiple EC2 regions. - -Dependencies ------------- -You will need to run debian wheezy with **python 2.7** and **debootstrap** installed. -Other depencies include: -* qemu-utils -* parted -* grub2 -* euca2ools -* xfsprogs (If you want to use XFS as a filesystem) -Also the following python libraries are required: -* **boto** ([version 2.14.0 or higher](https://github.com/boto/boto)) -* **jsonschema** ([version 2.0.0](https://pypi.python.org/pypi/jsonschema), only available through pip) -* **termcolor** -* **fysom** - -Bootstrapping instance store AMIs requires **euca2ools** to be installed. +Documentation +------------- +The documentation for bootstrap-vz is available +at [andsens.github.io/bootstrap-vz](http://andsens.github.io/bootstrap-vz). +There, you can discover [what the dependencies](http://andsens.github.io/bootstrap-vz/#dependencies) +for a specific cloud provider are, [see the list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html) +and learn [how you create a manifest](http://andsens.github.io/bootstrap-vz/manifest.html). From 6e145f6aca47801ea8f9d849d4e8da27f5487b52 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 17:52:05 +0100 Subject: [PATCH 04/22] Introduce format_command. Fixes #7 --- base/fs/partitionmaps/gpt.py | 6 ++++-- base/fs/partitionmaps/msdos.py | 8 ++++++-- base/fs/partitionmaps/none.py | 3 ++- base/fs/partitions/abstract.py | 23 ++++++++++++++++------- base/fs/partitions/base.py | 4 ++-- base/fs/partitions/gpt.py | 4 ++-- base/fs/partitions/gpt_swap.py | 2 +- base/fs/partitions/msdos_swap.py | 2 +- base/fs/partitions/unformatted.py | 2 +- base/manifest-schema.json | 7 ++++++- 10 files changed, 41 insertions(+), 20 deletions(-) diff --git a/base/fs/partitionmaps/gpt.py b/base/fs/partitionmaps/gpt.py index 2502288..12f3383 100644 --- a/base/fs/partitionmaps/gpt.py +++ b/base/fs/partitionmaps/gpt.py @@ -23,13 +23,15 @@ class GPTPartitionMap(AbstractPartitionMap): self.partitions.append(self.grub_boot) if 'boot' in data: - self.boot = GPTPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], + self.boot = GPTPartition(Bytes(data['boot']['size']), + data['boot']['filesystem'], data['boot'].get('format_command', None), 'boot', last_partition()) self.partitions.append(self.boot) if 'swap' in data: self.swap = GPTSwapPartition(Bytes(data['swap']['size']), last_partition()) self.partitions.append(self.swap) - self.root = GPTPartition(Bytes(data['root']['size']), data['root']['filesystem'], + self.root = GPTPartition(Bytes(data['root']['size']), + data['root']['filesystem'], data['root'].get('format_command', None), 'root', last_partition()) self.partitions.append(self.root) diff --git a/base/fs/partitionmaps/msdos.py b/base/fs/partitionmaps/msdos.py index ab71978..988178a 100644 --- a/base/fs/partitionmaps/msdos.py +++ b/base/fs/partitionmaps/msdos.py @@ -14,12 +14,16 @@ class MSDOSPartitionMap(AbstractPartitionMap): return self.partitions[-1] if len(self.partitions) > 0 else None if 'boot' in data: - self.boot = MSDOSPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], None) + self.boot = MSDOSPartition(Bytes(data['boot']['size']), + data['boot']['filesystem'], data['boot'].get('format_command', None), + last_partition()) self.partitions.append(self.boot) if 'swap' in data: self.swap = MSDOSSwapPartition(Bytes(data['swap']['size']), last_partition()) self.partitions.append(self.swap) - self.root = MSDOSPartition(Bytes(data['root']['size']), data['root']['filesystem'], last_partition()) + self.root = MSDOSPartition(Bytes(data['root']['size']), + data['root']['filesystem'], data['root'].get('format_command', None), + last_partition()) self.partitions.append(self.root) getattr(self, 'boot', self.root).flags.append('boot') diff --git a/base/fs/partitionmaps/none.py b/base/fs/partitionmaps/none.py index 8fc7650..550cbf5 100644 --- a/base/fs/partitionmaps/none.py +++ b/base/fs/partitionmaps/none.py @@ -5,7 +5,8 @@ class NoPartitions(object): def __init__(self, data, bootloader): from common.bytes import Bytes - self.root = SinglePartition(Bytes(data['root']['size']), data['root']['filesystem']) + self.root = SinglePartition(Bytes(data['root']['size']), + data['root']['filesystem'], data['root'].get('format_command', None)) self.partitions = [self.root] def is_blocking(self): diff --git a/base/fs/partitions/abstract.py b/base/fs/partitions/abstract.py index 5684561..f987bfb 100644 --- a/base/fs/partitions/abstract.py +++ b/base/fs/partitions/abstract.py @@ -36,11 +36,12 @@ class AbstractPartition(FSMProxy): log_check_call(['/bin/umount', self.mount_dir]) del self.mount_dir - def __init__(self, size, filesystem): - self.size = size - self.filesystem = filesystem - self.device_path = None - self.mounts = {} + def __init__(self, size, filesystem, format_command): + self.size = size + self.filesystem = filesystem + self.format_command = format_command + self.device_path = None + self.mounts = {} cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} super(AbstractPartition, self).__init__(cfg) @@ -57,8 +58,16 @@ class AbstractPartition(FSMProxy): return self.get_start() + self.size def _before_format(self, e): - mkfs = '/sbin/mkfs.{fs}'.format(fs=self.filesystem) - log_check_call([mkfs, self.device_path]) + if self.format_command is None: + format_command = ['/sbin/mkfs.{fs}', '{device_path}'] + else: + format_command = self.format_command + variables = {'fs': self.filesystem, + 'device_path': self.device_path, + 'size': self.size, + } + command = map(lambda part: part.format(**variables), format_command) + log_check_call(command) def _before_mount(self, e): log_check_call(['/bin/mount', '--types', self.filesystem, self.device_path, e.destination]) diff --git a/base/fs/partitions/base.py b/base/fs/partitions/base.py index 216479e..78b7e74 100644 --- a/base/fs/partitions/base.py +++ b/base/fs/partitions/base.py @@ -14,12 +14,12 @@ class BasePartition(AbstractPartition): {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, ] - def __init__(self, size, filesystem, previous): + def __init__(self, size, filesystem, format_command, previous): self.previous = previous from common.bytes import Bytes self.offset = Bytes(0) self.flags = [] - super(BasePartition, self).__init__(size, filesystem) + super(BasePartition, self).__init__(size, filesystem, format_command) def create(self, volume): self.fsm.create(volume=volume) diff --git a/base/fs/partitions/gpt.py b/base/fs/partitions/gpt.py index aed7f2f..52996ab 100644 --- a/base/fs/partitions/gpt.py +++ b/base/fs/partitions/gpt.py @@ -4,9 +4,9 @@ from base import BasePartition class GPTPartition(BasePartition): - def __init__(self, size, filesystem, name, previous): + def __init__(self, size, filesystem, format_command, name, previous): self.name = name - super(GPTPartition, self).__init__(size, filesystem, previous) + super(GPTPartition, self).__init__(size, filesystem, format_command, previous) def _before_create(self, e): super(GPTPartition, self)._before_create(e) diff --git a/base/fs/partitions/gpt_swap.py b/base/fs/partitions/gpt_swap.py index 0217770..4af9245 100644 --- a/base/fs/partitions/gpt_swap.py +++ b/base/fs/partitions/gpt_swap.py @@ -5,7 +5,7 @@ from gpt import GPTPartition class GPTSwapPartition(GPTPartition): def __init__(self, size, previous): - super(GPTSwapPartition, self).__init__(size, 'swap', 'swap', previous) + super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous) def _before_format(self, e): log_check_call(['/sbin/mkswap', self.device_path]) diff --git a/base/fs/partitions/msdos_swap.py b/base/fs/partitions/msdos_swap.py index 4b1d1dc..f4e0339 100644 --- a/base/fs/partitions/msdos_swap.py +++ b/base/fs/partitions/msdos_swap.py @@ -5,7 +5,7 @@ from msdos import MSDOSPartition class MSDOSSwapPartition(MSDOSPartition): def __init__(self, size, previous): - super(MSDOSSwapPartition, self).__init__(size, 'swap', previous) + super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous) def _before_format(self, e): log_check_call(['/sbin/mkswap', self.device_path]) diff --git a/base/fs/partitions/unformatted.py b/base/fs/partitions/unformatted.py index bbbc357..bb8e343 100644 --- a/base/fs/partitions/unformatted.py +++ b/base/fs/partitions/unformatted.py @@ -9,4 +9,4 @@ class UnformattedPartition(BasePartition): ] def __init__(self, size, previous): - super(UnformattedPartition, self).__init__(size, None, previous) + super(UnformattedPartition, self).__init__(size, None, None, previous) diff --git a/base/manifest-schema.json b/base/manifest-schema.json index 17dc233..f4d3d43 100644 --- a/base/manifest-schema.json +++ b/base/manifest-schema.json @@ -141,7 +141,12 @@ "type": "object", "properties": { "size": { "$ref": "#/definitions/bytes" }, - "filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] } + "filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] }, + "format_command": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1 + } }, "required": ["size", "filesystem"] } From ed4fa5e833e3ca394b9e3947faa9fbe5fba52e06 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 19:46:50 +0100 Subject: [PATCH 05/22] Use --arch in euca-bundle-image. Fixes #23 --- providers/ec2/tasks/ami.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/providers/ec2/tasks/ami.py b/providers/ec2/tasks/ami.py index 9e0ff0b..c5f5df4 100644 --- a/providers/ec2/tasks/ami.py +++ b/providers/ec2/tasks/ami.py @@ -38,8 +38,10 @@ class BundleImage(Task): def run(cls, info): bundle_name = 'bundle-{id}'.format(id=info.run_id) info.bundle_path = os.path.join(info.workspace, bundle_name) + arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) log_check_call(['/usr/bin/euca-bundle-image', '--image', info.volume.image_path, + '--arch', arch, '--user', info.credentials['user-id'], '--privatekey', info.credentials['private-key'], '--cert', info.credentials['certificate'], From 46dbe9e8c6dce3917e282bc7b53c1a389ce268b4 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 20:14:23 +0100 Subject: [PATCH 06/22] Move AKI info into JSON file. Fixes #13 Also introduced config_get() to retrieve a specific value from these JSON files. Comments are now supported in those files as well --- base/manifest.py | 5 ++- {base => common}/minify_json.py | 0 common/tasks/network.py | 10 ++--- common/tools.py | 14 ++++++ providers/ec2/tasks/ami-akis.json | 34 ++++++++++++++ providers/ec2/tasks/ami.py | 55 ++--------------------- providers/ec2/tasks/packages-kernels.json | 1 + providers/ec2/tasks/packages.py | 11 +++-- 8 files changed, 66 insertions(+), 64 deletions(-) rename {base => common}/minify_json.py (100%) create mode 100644 providers/ec2/tasks/ami-akis.json diff --git a/base/manifest.py b/base/manifest.py index a51f826..632c28e 100644 --- a/base/manifest.py +++ b/base/manifest.py @@ -1,3 +1,4 @@ +from common.tools import load_json import logging log = logging.getLogger(__name__) @@ -10,7 +11,7 @@ class Manifest(object): self.parse() def load(self): - self.data = self.load_json(self.path) + self.data = load_json(self.path) provider_modname = 'providers.{provider}'.format(provider=self.data['provider']) log.debug('Loading provider `{modname}\''.format(modname=provider_modname)) self.modules = {'provider': __import__(provider_modname, fromlist=['providers']), @@ -55,7 +56,7 @@ class Manifest(object): def schema_validator(self, data, schema_path): import jsonschema - schema = self.load_json(schema_path) + schema = load_json(schema_path) try: jsonschema.validate(data, schema) except jsonschema.ValidationError as e: diff --git a/base/minify_json.py b/common/minify_json.py similarity index 100% rename from base/minify_json.py rename to common/minify_json.py diff --git a/common/tasks/network.py b/common/tasks/network.py index e499929..3ebdf44 100644 --- a/common/tasks/network.py +++ b/common/tasks/network.py @@ -29,10 +29,10 @@ class ConfigureNetworkIF(Task): @classmethod def run(cls, info): + network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.json') + from common.tools import config_get + if_config = config_get(network_config_path, [info.manifest.system['release']]) + interfaces_path = os.path.join(info.root, 'etc/network/interfaces') - if_config = [] - with open('common/tasks/network-configuration.json') as stream: - import json - if_config = json.loads(stream.read()) with open(interfaces_path, 'a') as interfaces: - interfaces.write('\n'.join(if_config.get(info.manifest.system['release'])) + '\n') + interfaces.write('\n'.join(if_config) + '\n') diff --git a/common/tools.py b/common/tools.py index 2833b94..dc56637 100644 --- a/common/tools.py +++ b/common/tools.py @@ -56,3 +56,17 @@ def sed_i(file_path, pattern, subst): import re for line in fileinput.input(files=file_path, inplace=True): print re.sub(pattern, subst, line), + + +def load_json(path): + import json + from minify_json import json_minify + with open(path) as stream: + return json.loads(json_minify(stream.read(), False)) + + +def config_get(path, config_path): + config = load_json(path) + for key in config_path: + config = config.get(key) + return config diff --git a/providers/ec2/tasks/ami-akis.json b/providers/ec2/tasks/ami-akis.json new file mode 100644 index 0000000..79e1b66 --- /dev/null +++ b/providers/ec2/tasks/ami-akis.json @@ -0,0 +1,34 @@ +// This is a mapping of EC2 regions to processor architectures to Amazon Kernel Images +// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs +{ +"ap-northeast-1": // Asia Pacific (Tokyo) Region + {"i386": "aki-136bf512", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-176bf516"}, // pv-grub-hd0_1.04-x86_64.gz +"ap-southeast-1": // Asia Pacific (Singapore) Region + {"i386": "aki-ae3973fc", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-503e7402"}, // pv-grub-hd0_1.04-x86_64.gz +"ap-southeast-2": // Asia Pacific (Sydney) Region + {"i386": "aki-cd62fff7", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-c362fff9"}, // pv-grub-hd0_1.04-x86_64.gz +"eu-west-1": // EU (Ireland) Region + {"i386": "aki-68a3451f", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-52a34525"}, // pv-grub-hd0_1.04-x86_64.gz +"sa-east-1": // South America (Sao Paulo) Region + {"i386": "aki-5b53f446", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-5553f448"}, // pv-grub-hd0_1.04-x86_64.gz +"us-east-1": // US East (Northern Virginia) Region + {"i386": "aki-8f9dcae6", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-919dcaf8"}, // pv-grub-hd0_1.04-x86_64.gz +"us-gov-west-1": // AWS GovCloud (US) + {"i386": "aki-1fe98d3c", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-1de98d3e"}, // pv-grub-hd0_1.04-x86_64.gz +"us-west-1": // US West (Northern California) Region + {"i386": "aki-8e0531cb", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-880531cd"}, // pv-grub-hd0_1.04-x86_64.gz +"us-west-2": // US West (Oregon) Region + {"i386": "aki-f08f11c0", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-fc8f11cc"}, // pv-grub-hd0_1.04-x86_64.gz +"cn-north-1":// China North (Beijing) Region + {"i386": "aki-908f1da9", // pv-grub-hd0_1.04-i386.gz + "amd64": "aki-9e8f1da7"} // pv-grub-hd0_1.04-x86_64.gz +} diff --git a/providers/ec2/tasks/ami.py b/providers/ec2/tasks/ami.py index c5f5df4..28de349 100644 --- a/providers/ec2/tasks/ami.py +++ b/providers/ec2/tasks/ami.py @@ -92,48 +92,6 @@ class RegisterAMI(Task): phase = phases.image_registration predecessors = [Snapshot, UploadImage] - # Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs - kernel_mapping = {'ap-northeast-1': # Asia Pacific (Tokyo) Region - {'i386': 'aki-136bf512', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-176bf516' # pv-grub-hd0_1.04-x86_64.gz - }, - 'ap-southeast-1': # Asia Pacific (Singapore) Region - {'i386': 'aki-ae3973fc', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-503e7402' # pv-grub-hd0_1.04-x86_64.gz - }, - 'ap-southeast-2': # Asia Pacific (Sydney) Region - {'i386': 'aki-cd62fff7', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-c362fff9' # pv-grub-hd0_1.04-x86_64.gz - }, - 'eu-west-1': # EU (Ireland) Region - {'i386': 'aki-68a3451f', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-52a34525' # pv-grub-hd0_1.04-x86_64.gz - }, - 'sa-east-1': # South America (Sao Paulo) Region - {'i386': 'aki-5b53f446', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-5553f448' # pv-grub-hd0_1.04-x86_64.gz - }, - 'us-east-1': # US East (Northern Virginia) Region - {'i386': 'aki-8f9dcae6', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-919dcaf8' # pv-grub-hd0_1.04-x86_64.gz - }, - 'us-gov-west-1': # AWS GovCloud (US) - {'i386': 'aki-1fe98d3c', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-1de98d3e' # pv-grub-hd0_1.04-x86_64.gz - }, - 'us-west-1': # US West (Northern California) Region - {'i386': 'aki-8e0531cb', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-880531cd' # pv-grub-hd0_1.04-x86_64.gz - }, - 'us-west-2': # US West (Oregon) Region - {'i386': 'aki-f08f11c0', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-fc8f11cc' # pv-grub-hd0_1.04-x86_64.gz - }, - 'cn-north-1': # China North (Beijing) Region - {'i386': 'aki-908f1da9', # pv-grub-hd0_1.04-i386.gz - 'amd64': 'aki-9e8f1da7' # pv-grub-hd0_1.04-x86_64.gz - } - } @classmethod def run(cls, info): registration_params = {'name': info.ami_name, @@ -142,17 +100,11 @@ class RegisterAMI(Task): 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) if info.manifest.volume['backing'] == 's3': - grub_boot_device = 'hd0' registration_params['image_location'] = info.manifest.manifest_location else: root_dev_name = {'pvm': '/dev/sda', 'hvm': '/dev/xvda'}.get(info.manifest.data['virtualization']) registration_params['root_device_name'] = root_dev_name - from base.fs.partitionmaps.none import NoPartitions - if isinstance(info.volume.partition_map, NoPartitions): - grub_boot_device = 'hd0' - else: - grub_boot_device = 'hd00' from boto.ec2.blockdevicemapping import BlockDeviceType from boto.ec2.blockdevicemapping import BlockDeviceMapping @@ -165,8 +117,9 @@ class RegisterAMI(Task): registration_params['virtualization_type'] = 'hvm' else: registration_params['virtualization_type'] = 'paravirtual' - registration_params['kernel_id'] = (cls.kernel_mapping - .get(info.host['region']) - .get(info.manifest.system['architecture'])) + akis_path = os.path.join(os.path.dirname(__file__), 'akis.json') + from common.tools import config_get + registration_params['kernel_id'] = config_get(akis_path, [info.host['region'], + info.manifest.system['architecture']]) info.image = info.connection.register_image(**registration_params) diff --git a/providers/ec2/tasks/packages-kernels.json b/providers/ec2/tasks/packages-kernels.json index 3c9e39f..6bf1e1c 100644 --- a/providers/ec2/tasks/packages-kernels.json +++ b/providers/ec2/tasks/packages-kernels.json @@ -1,4 +1,5 @@ { + // In squeeze, we need a special kernel flavor for xen "squeeze": { "amd64": "linux-image-xen-amd64", "i386" : "linux-image-xen-686" }, diff --git a/providers/ec2/tasks/packages.py b/providers/ec2/tasks/packages.py index 242457a..ebd49ef 100644 --- a/providers/ec2/tasks/packages.py +++ b/providers/ec2/tasks/packages.py @@ -17,10 +17,9 @@ class DefaultPackages(Task): info.exclude_packages.add('isc-dhcp-client') info.exclude_packages.add('isc-dhcp-common') - # In squeeze, we need a special kernel flavor for xen - kernels = {} - with open('providers/ec2/tasks/packages-kernels.json') as stream: - import json - kernels = json.loads(stream.read()) - kernel_package = kernels.get(info.manifest.system['release']).get(info.manifest.system['architecture']) + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.json') + from common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.system['release'], + info.manifest.system['architecture']]) info.packages.add(kernel_package) From ff968c38690e43bf07bcf9cd6c013abda5ebb6eb Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 20:27:41 +0100 Subject: [PATCH 07/22] PEP8 compliance --- common/bytes.py | 6 +++--- common/tasks/apt.py | 8 ++++---- providers/kvm/tasks/virtio.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/common/bytes.py b/common/bytes.py index e6f2b88..0a2aab2 100644 --- a/common/bytes.py +++ b/common/bytes.py @@ -4,9 +4,9 @@ class Bytes(object): units = {'B': 1, 'KiB': 1024, - 'MiB': 1024*1024, - 'GiB': 1024*1024*1024, - 'TiB': 1024*1024*1024*1024, + 'MiB': 1024 * 1024, + 'GiB': 1024 * 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 * 1024, } def __init__(self, qty): diff --git a/common/tasks/apt.py b/common/tasks/apt.py index b0a2f75..d39d0bf 100644 --- a/common/tasks/apt.py +++ b/common/tasks/apt.py @@ -26,11 +26,11 @@ class AddDefaultSources(Task): sections = 'main' if 'sections' in info.manifest.system: sections = ' '.join(info.manifest.system['sections']) - info.source_lists.add('main', 'deb {apt_mirror} {system.release} '+sections) - info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} '+sections) + info.source_lists.add('main', 'deb {apt_mirror} {system.release} ' + sections) + info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} ' + sections) if info.manifest.system['release'] not in {'testing', 'unstable'}: - info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates '+sections) - info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates '+sections) + info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates ' + sections) + info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates ' + sections) class InstallTrustedKeys(Task): diff --git a/providers/kvm/tasks/virtio.py b/providers/kvm/tasks/virtio.py index a3d2370..06bb603 100644 --- a/providers/kvm/tasks/virtio.py +++ b/providers/kvm/tasks/virtio.py @@ -13,4 +13,4 @@ class VirtIO(Task): with open(modules, "a") as modules_file: modules_file.write("\n") for module in info.manifest.bootstrapper.get('virtio', []): - modules_file.write(module+"\n") + modules_file.write(module + "\n") From 6c18bca05aec5c844fea7fe3067fca91cffa9bec Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 20:48:42 +0100 Subject: [PATCH 08/22] Move virtio module list into "system". Fixes #5 --- manifests/kvm-virtio.manifest.json | 16 ++++++++-------- providers/kvm/manifest-schema.json | 17 +++++++++-------- providers/kvm/tasks/virtio.py | 2 +- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/manifests/kvm-virtio.manifest.json b/manifests/kvm-virtio.manifest.json index 689ad4a..01452f9 100644 --- a/manifests/kvm-virtio.manifest.json +++ b/manifests/kvm-virtio.manifest.json @@ -2,20 +2,20 @@ "provider": "kvm", "bootstrapper": { "workspace": "/target", - "mirror": "http://ftp.fr.debian.org/debian/", - "virtio" : [ "virtio_pci", "virtio_blk" ] + "mirror": "http://ftp.fr.debian.org/debian/" }, "image": { "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", "description": "Debian {system.release} {system.architecture}" }, "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" + "release": "wheezy", + "architecture": "amd64", + "bootloader": "grub", + "timezone": "UTC", + "locale": "en_US", + "charmap": "UTF-8", + "virtio_modules": [ "virtio_pci", "virtio_blk" ] }, "packages": {}, "volume": { diff --git a/providers/kvm/manifest-schema.json b/providers/kvm/manifest-schema.json index 12937c8..3586e4e 100644 --- a/providers/kvm/manifest-schema.json +++ b/providers/kvm/manifest-schema.json @@ -3,21 +3,22 @@ "title": "KVM manifest", "type": "object", "properties": { - "bootstrapper": { + "system": { "type": "object", "properties": { "virtio": { "type": "array", "items": { - "type": "string" + "type": "string", + "enum": ["virtio", + "virtio_pci", + "virtio_balloon", + "virtio_blk", + "virtio_net", + "virtio_ring"] }, "minItems": 1 - } - } - }, - "system": { - "type": "object", - "properties": { + }, "bootloader": { "type": "string", "enum": ["grub", "extlinux"] diff --git a/providers/kvm/tasks/virtio.py b/providers/kvm/tasks/virtio.py index 06bb603..96c05cb 100644 --- a/providers/kvm/tasks/virtio.py +++ b/providers/kvm/tasks/virtio.py @@ -12,5 +12,5 @@ class VirtIO(Task): modules = os.path.join(info.root, '/etc/initramfs-tools/modules') with open(modules, "a") as modules_file: modules_file.write("\n") - for module in info.manifest.bootstrapper.get('virtio', []): + for module in info.manifest.system.get('virtio', []): modules_file.write(module + "\n") From 7a4721bd705ed45a37ae4b4fdcf6c57d969d322e Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 20:53:58 +0100 Subject: [PATCH 09/22] Make 'packages' optional --- base/manifest-schema.json | 5 ++--- base/manifest.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/base/manifest-schema.json b/base/manifest-schema.json index f4d3d43..5be723d 100644 --- a/base/manifest-schema.json +++ b/base/manifest-schema.json @@ -99,7 +99,7 @@ "additionalProperties": false } }, - "required": ["provider", "bootstrapper", "image", "volume", "system"], + "required": ["provider", "bootstrapper", "system", "volume"], "definitions": { "path": { "type": "string", @@ -150,6 +150,5 @@ }, "required": ["size", "filesystem"] } - }, - "required": ["provider", "bootstrapper", "system", "packages", "volume"] + } } diff --git a/base/manifest.py b/base/manifest.py index 632c28e..a8bd61e 100644 --- a/base/manifest.py +++ b/base/manifest.py @@ -45,7 +45,7 @@ class Manifest(object): self.image = self.data['image'] self.volume = self.data['volume'] self.system = self.data['system'] - self.packages = self.data['packages'] + self.packages = self.data['packages'] if 'packages' in self.data else {} self.plugins = self.data['plugins'] if 'plugins' in self.data else {} def load_json(self, path): From 739e22e929a440a439ef8824893c9b5ada8ac0eb Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 21:14:21 +0100 Subject: [PATCH 10/22] Translate release names into codenames. Fixes #15 --- base/bootstrapinfo.py | 4 ++++ base/release-codenames.json | 22 ++++++++++++++++++ common/tasks/network-configuration.json | 27 +++++++++-------------- common/tasks/network.py | 2 +- providers/ec2/tasks/packages-kernels.json | 26 +++++++++------------- providers/ec2/tasks/packages.py | 2 +- 6 files changed, 48 insertions(+), 35 deletions(-) create mode 100644 base/release-codenames.json diff --git a/base/bootstrapinfo.py b/base/bootstrapinfo.py index 2b4bcec..9dfe5c4 100644 --- a/base/bootstrapinfo.py +++ b/base/bootstrapinfo.py @@ -16,6 +16,10 @@ class BootstrapInformation(object): self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian') + release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json') + from common.tools import config_get + self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']]) + class DictClass(dict): def __getattr__(self, name): return self[name] diff --git a/base/release-codenames.json b/base/release-codenames.json new file mode 100644 index 0000000..cac8692 --- /dev/null +++ b/base/release-codenames.json @@ -0,0 +1,22 @@ +{ // This is a mapping of Debian release names to their respective codenames + "unstable": "sid", + "testing": "jessie", + "stable": "wheezy", + "oldstable": "squeeze", + + "jessie": "jessie", + "wheezy": "wheezy", + "squeeze": "squeeze", + + // The following release names are not supported, but included of completeness sake + "lenny": "lenny", + "etch": "etch", + "sarge": "sarge", + "woody": "woody", + "potato": "potato", + "slink": "slink", + "hamm": "hamm", + "bo": "bo", + "rex": "rex", + "buzz": "buzz" +} diff --git a/common/tasks/network-configuration.json b/common/tasks/network-configuration.json index 5a3c533..4e5dce7 100644 --- a/common/tasks/network-configuration.json +++ b/common/tasks/network-configuration.json @@ -1,19 +1,12 @@ +// This is a mapping of Debian release codenames to NIC configurations +// Every item in an array is a line { - "squeeze": [ - "auto lo", - "iface lo inet loopback", - "auto eth0", - "iface eth0 inet dhcp" ], - "wheezy": [ - "auto eth0", - "iface eth0 inet dhcp" ], - "jessie": [ - "auto eth0", - "iface eth0 inet dhcp" ], - "testing": [ - "auto eth0", - "iface eth0 inet dhcp" ], - "unstable": [ - "auto eth0", - "iface eth0 inet dhcp" ] +"squeeze": ["auto lo", + "iface lo inet loopback", + "auto eth0", + "iface eth0 inet dhcp"], +"wheezy": ["auto eth0", + "iface eth0 inet dhcp"], +"jessie": ["auto eth0", + "iface eth0 inet dhcp"] } diff --git a/common/tasks/network.py b/common/tasks/network.py index 3ebdf44..5439718 100644 --- a/common/tasks/network.py +++ b/common/tasks/network.py @@ -31,7 +31,7 @@ class ConfigureNetworkIF(Task): def run(cls, info): network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.json') from common.tools import config_get - if_config = config_get(network_config_path, [info.manifest.system['release']]) + if_config = config_get(network_config_path, [info.release_codename]) interfaces_path = os.path.join(info.root, 'etc/network/interfaces') with open(interfaces_path, 'a') as interfaces: diff --git a/providers/ec2/tasks/packages-kernels.json b/providers/ec2/tasks/packages-kernels.json index 6bf1e1c..6d33632 100644 --- a/providers/ec2/tasks/packages-kernels.json +++ b/providers/ec2/tasks/packages-kernels.json @@ -1,18 +1,12 @@ +// This is a mapping of Debian release codenames to processor architectures to kernel packages { - // In squeeze, we need a special kernel flavor for xen - "squeeze": { - "amd64": "linux-image-xen-amd64", - "i386" : "linux-image-xen-686" }, - "wheezy": { - "amd64": "linux-image-amd64", - "i386" : "linux-image-686" }, - "jessie": { - "amd64": "linux-image-amd64", - "i386" : "linux-image-686" }, - "testing": { - "amd64": "linux-image-amd64", - "i386" : "linux-image-686" }, - "unstable": { - "amd64": "linux-image-amd64", - "i386" : "linux-image-686" } +"squeeze": // In squeeze, we need a special kernel flavor for xen + {"i386": "linux-image-xen-686", + "amd64": "linux-image-xen-amd64"}, +"wheezy": + {"i386": "linux-image-686", + "amd64": "linux-image-amd64"}, +"jessie": { + {"i386": "linux-image-686", + "amd64": "linux-image-amd64"}, } diff --git a/providers/ec2/tasks/packages.py b/providers/ec2/tasks/packages.py index ebd49ef..ba2787d 100644 --- a/providers/ec2/tasks/packages.py +++ b/providers/ec2/tasks/packages.py @@ -20,6 +20,6 @@ class DefaultPackages(Task): import os.path kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.json') from common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.manifest.system['release'], + kernel_package = config_get(kernel_packages_path, [info.release_codename, info.manifest.system['architecture']]) info.packages.add(kernel_package) From 5cff8f9b1d76990e35d20860635570df0300a300 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 21:51:28 +0100 Subject: [PATCH 11/22] Use `type' to check if command available. Fixes #11 --- base/bootstrapinfo.py | 2 +- common/task_sets.py | 4 ++-- common/tasks/host.py | 40 +++++++++++++++++++------------------ common/tools.py | 7 ++++--- providers/ec2/__init__.py | 2 +- providers/ec2/tasks/host.py | 9 +++++---- 6 files changed, 34 insertions(+), 30 deletions(-) diff --git a/base/bootstrapinfo.py b/base/bootstrapinfo.py index 9dfe5c4..bcaf881 100644 --- a/base/bootstrapinfo.py +++ b/base/bootstrapinfo.py @@ -55,6 +55,6 @@ class BootstrapInformation(object): self.include_packages = set() self.exclude_packages = set() - self.host_dependencies = set() + self.host_dependencies = {} self.initd = {'install': {}, 'disable': []} diff --git a/common/task_sets.py b/common/task_sets.py index d0afec4..558ebb8 100644 --- a/common/task_sets.py +++ b/common/task_sets.py @@ -12,8 +12,8 @@ from common.tasks import security from common.tasks import locale base_set = [workspace.CreateWorkspace, - host.HostDependencies, - host.CheckHostDependencies, + host.AddExternalCommands, + host.CheckExternalCommands, bootstrap.Bootstrap, workspace.DeleteWorkspace, ] diff --git a/common/tasks/host.py b/common/tasks/host.py index 7bf79dd..fbedaf3 100644 --- a/common/tasks/host.py +++ b/common/tasks/host.py @@ -3,47 +3,49 @@ from common import phases from common.exceptions import TaskError -class HostDependencies(Task): - description = 'Determining required host dependencies' +class AddExternalCommands(Task): + description = 'Determining which external commands are required' phase = phases.preparation @classmethod def run(cls, info): - info.host_dependencies.add('debootstrap') + info.host_dependencies['debootstrap'] = 'debootstrap' from common.fs.loopbackvolume import LoopbackVolume if isinstance(info.volume, LoopbackVolume): - info.host_dependencies.add('qemu-utils') + info.host_dependencies['qemu-img'] = 'qemu-utils' + info.host_dependencies['losetup'] = 'mount' + from common.fs.qemuvolume import QEMUVolume + if isinstance(info.volume, QEMUVolume): + info.host_dependencies['losetup'] = 'qemu-nbd' if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions): - info.host_dependencies.add('xfsprogs') + info.host_dependencies['mkfs.xfs'] = 'xfsprogs' from base.fs.partitionmaps.none import NoPartitions if not isinstance(info.volume.partition_map, NoPartitions): - info.host_dependencies.update(['parted', 'kpartx']) + info.host_dependencies['parted'] = 'parted' + info.host_dependencies['kpartx'] = 'kpartx' -class CheckHostDependencies(Task): - description = 'Checking installed host packages' +class CheckExternalCommands(Task): + description = 'Checking availability of external commands' phase = phases.preparation - predecessors = [HostDependencies] + predecessors = [AddExternalCommands] @classmethod def run(cls, info): from common.tools import log_check_call from subprocess import CalledProcessError missing_packages = [] - for package in info.host_dependencies: + for command, package in info.host_dependencies.items(): try: - import os.path - if os.path.isfile('/usr/bin/dpkg-query'): - log_check_call(['/usr/bin/dpkg-query', '-s', package]) + log_check_call(['type ' + command], shell=True) except CalledProcessError: - missing_packages.append(package) + msg = ('The command `{command}\' is not available, ' + 'it is located in the package `{package}\'.' + .format(command=command, package=package)) + missing_packages.append(msg) if len(missing_packages) > 0: - pkgs = '\', `'.join(missing_packages) - if len(missing_packages) > 1: - msg = "The packages `{packages}\' are not installed".format(packages=pkgs) - else: - msg = "The package `{packages}\' is not installed".format(packages=pkgs) + msg = '\n'.join(missing_packages) raise TaskError(msg) diff --git a/common/tools.py b/common/tools.py index dc56637..6b39db5 100644 --- a/common/tools.py +++ b/common/tools.py @@ -1,14 +1,14 @@ -def log_check_call(command, stdin=None, env=None): - status, stdout, stderr = log_call(command, stdin, env) +def log_check_call(command, stdin=None, env=None, shell=False): + status, stdout, stderr = log_call(command, stdin, env, shell) if status != 0: from subprocess import CalledProcessError raise CalledProcessError(status, ' '.join(command), '\n'.join(stderr)) return stdout -def log_call(command, stdin=None, env=None): +def log_call(command, stdin=None, env=None, shell=False): import subprocess import select @@ -22,6 +22,7 @@ def log_call(command, stdin=None, env=None): popen_args = {'args': command, 'env': env, + 'shell': shell, 'stdin': subprocess.PIPE, 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } diff --git a/providers/ec2/__init__.py b/providers/ec2/__init__.py index 5038e8d..eee1866 100644 --- a/providers/ec2/__init__.py +++ b/providers/ec2/__init__.py @@ -61,7 +61,7 @@ def resolve_tasks(taskset, manifest): if manifest.volume['partitions']['type'] != 'none': taskset.update(common.task_sets.partitioning_set) - taskset.update([tasks.host.HostDependencies, + taskset.update([tasks.host.AddExternalCommands, tasks.packages.DefaultPackages, tasks.connection.GetCredentials, tasks.host.GetInfo, diff --git a/providers/ec2/tasks/host.py b/providers/ec2/tasks/host.py index 661f01f..355e576 100644 --- a/providers/ec2/tasks/host.py +++ b/providers/ec2/tasks/host.py @@ -3,15 +3,16 @@ from common import phases from common.tasks import host -class HostDependencies(Task): - description = 'Adding required host packages for EC2 bootstrapping' +class AddExternalCommands(Task): + description = 'Determining required external commands for EC2 bootstrapping' phase = phases.preparation - successors = [host.CheckHostDependencies] + successors = [host.CheckExternalCommands] @classmethod def run(cls, info): if info.manifest.volume['backing'] == 's3': - info.host_dependencies.add('euca2ools') + info.host_dependencies['euca-bundle-image'] = 'euca2ools' + info.host_dependencies['euca-upload-bundle'] = 'euca2ools' class GetInfo(Task): From 0ef1d3ab695870dd8317fba14d83990f8f14182a Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 22:03:13 +0100 Subject: [PATCH 12/22] Split up RequiredCommands task. The requirements are now located in the appropriate modules --- common/task_sets.py | 6 ++++-- common/tasks/bootstrap.py | 11 +++++++++++ common/tasks/filesystem.py | 14 +++++++++++++- common/tasks/host.py | 26 -------------------------- common/tasks/loopback.py | 17 +++++++++++++++++ common/tasks/partitioning.py | 14 ++++++++++++++ providers/ec2/__init__.py | 3 ++- 7 files changed, 61 insertions(+), 30 deletions(-) diff --git a/common/task_sets.py b/common/task_sets.py index 558ebb8..8af0ae7 100644 --- a/common/task_sets.py +++ b/common/task_sets.py @@ -12,7 +12,7 @@ from common.tasks import security from common.tasks import locale base_set = [workspace.CreateWorkspace, - host.AddExternalCommands, + bootstrap.AddRequiredCommands, host.CheckExternalCommands, bootstrap.Bootstrap, workspace.DeleteWorkspace, @@ -20,11 +20,13 @@ base_set = [workspace.CreateWorkspace, volume_set = [volume.Attach, volume.Detach, + filesystem.AddRequiredCommands, filesystem.Format, filesystem.FStab, ] -partitioning_set = [partitioning.PartitionVolume, +partitioning_set = [partitioning.AddRequiredCommands, + partitioning.PartitionVolume, partitioning.MapPartitions, partitioning.UnmapPartitions, ] diff --git a/common/tasks/bootstrap.py b/common/tasks/bootstrap.py index 8cb8d31..42d89ba 100644 --- a/common/tasks/bootstrap.py +++ b/common/tasks/bootstrap.py @@ -1,10 +1,21 @@ from base import Task from common import phases from common.exceptions import TaskError +import host import logging log = logging.getLogger(__name__) +class AddRequiredCommands(Task): + description = 'Adding commands required bootstrapping Debian' + phase = phases.preparation + successors = [host.CheckExternalCommands] + + @classmethod + def run(cls, info): + info.host_dependencies['debootstrap'] = 'debootstrap' + + def get_bootstrap_args(info): executable = ['/usr/sbin/debootstrap'] options = ['--arch=' + info.manifest.system['architecture']] diff --git a/common/tasks/filesystem.py b/common/tasks/filesystem.py index 4f79af5..b8f87c7 100644 --- a/common/tasks/filesystem.py +++ b/common/tasks/filesystem.py @@ -2,10 +2,22 @@ from base import Task from common import phases from common.tools import log_check_call from bootstrap import Bootstrap -from common.tasks import apt +import apt +import host import volume +class AddRequiredCommands(Task): + description = 'Adding commands required for formatting the partitions' + phase = phases.preparation + successors = [host.CheckExternalCommands] + + @classmethod + def run(cls, info): + if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions): + info.host_dependencies['mkfs.xfs'] = 'xfsprogs' + + class Format(Task): description = 'Formatting the volume' phase = phases.volume_preparation diff --git a/common/tasks/host.py b/common/tasks/host.py index fbedaf3..0522a49 100644 --- a/common/tasks/host.py +++ b/common/tasks/host.py @@ -3,35 +3,9 @@ from common import phases from common.exceptions import TaskError -class AddExternalCommands(Task): - description = 'Determining which external commands are required' - phase = phases.preparation - - @classmethod - def run(cls, info): - info.host_dependencies['debootstrap'] = 'debootstrap' - - from common.fs.loopbackvolume import LoopbackVolume - if isinstance(info.volume, LoopbackVolume): - info.host_dependencies['qemu-img'] = 'qemu-utils' - info.host_dependencies['losetup'] = 'mount' - from common.fs.qemuvolume import QEMUVolume - if isinstance(info.volume, QEMUVolume): - info.host_dependencies['losetup'] = 'qemu-nbd' - - if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions): - info.host_dependencies['mkfs.xfs'] = 'xfsprogs' - - from base.fs.partitionmaps.none import NoPartitions - if not isinstance(info.volume.partition_map, NoPartitions): - info.host_dependencies['parted'] = 'parted' - info.host_dependencies['kpartx'] = 'kpartx' - - class CheckExternalCommands(Task): description = 'Checking availability of external commands' phase = phases.preparation - predecessors = [AddExternalCommands] @classmethod def run(cls, info): diff --git a/common/tasks/loopback.py b/common/tasks/loopback.py index 9420429..615de0d 100644 --- a/common/tasks/loopback.py +++ b/common/tasks/loopback.py @@ -1,8 +1,25 @@ from base import Task from common import phases +import host import volume +class AddRequiredCommands(Task): + description = 'Adding commands required for creating loopback volumes' + phase = phases.preparation + successors = [host.CheckExternalCommands] + + @classmethod + def run(cls, info): + from common.fs.loopbackvolume import LoopbackVolume + if isinstance(info.volume, LoopbackVolume): + info.host_dependencies['qemu-img'] = 'qemu-utils' + info.host_dependencies['losetup'] = 'mount' + from common.fs.qemuvolume import QEMUVolume + if isinstance(info.volume, QEMUVolume): + info.host_dependencies['losetup'] = 'qemu-nbd' + + class Create(Task): description = 'Creating a loopback volume' phase = phases.volume_creation diff --git a/common/tasks/partitioning.py b/common/tasks/partitioning.py index a9d982c..88a8ebc 100644 --- a/common/tasks/partitioning.py +++ b/common/tasks/partitioning.py @@ -1,9 +1,23 @@ from base import Task from common import phases import filesystem +import host import volume +class AddRequiredCommands(Task): + description = 'Adding commands required for partitioning the volume' + phase = phases.preparation + successors = [host.CheckExternalCommands] + + @classmethod + def run(cls, info): + from base.fs.partitionmaps.none import NoPartitions + if not isinstance(info.volume.partition_map, NoPartitions): + info.host_dependencies['parted'] = 'parted' + info.host_dependencies['kpartx'] = 'kpartx' + + class PartitionVolume(Task): description = 'Partitioning the volume' phase = phases.volume_preparation diff --git a/providers/ec2/__init__.py b/providers/ec2/__init__.py index eee1866..243dbd8 100644 --- a/providers/ec2/__init__.py +++ b/providers/ec2/__init__.py @@ -97,7 +97,8 @@ def resolve_tasks(taskset, manifest): tasks.ebs.Attach, filesystem.FStab, tasks.ebs.Snapshot], - 's3': [loopback.Create, + 's3': [loopback.AddRequiredCommands, + loopback.Create, volume.Attach, tasks.filesystem.S3FStab, tasks.ami.BundleImage, From 851389da092bce8463f2d3b1b5d910915edf3ed2 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 22:16:10 +0100 Subject: [PATCH 13/22] Rely on $PATH to resolve commands. Fixes #12 --- base/fs/partitionmaps/abstract.py | 8 ++--- base/fs/partitionmaps/gpt.py | 2 +- base/fs/partitionmaps/msdos.py | 2 +- base/fs/partitions/abstract.py | 12 +++---- base/fs/partitions/base.py | 4 +-- base/fs/partitions/gpt.py | 2 +- base/fs/partitions/gpt_swap.py | 2 +- base/fs/partitions/msdos_swap.py | 2 +- base/fs/volume.py | 4 +-- common/fs/loopbackvolume.py | 6 ++-- common/fs/qemuvolume.py | 6 ++-- common/tasks/apt.py | 32 +++++++++---------- common/tasks/boot.py | 18 +++++------ common/tasks/bootstrap.py | 2 +- common/tasks/cleanup.py | 2 +- common/tasks/filesystem.py | 2 +- common/tasks/initd.py | 8 ++--- common/tasks/locale.py | 6 ++-- common/tasks/packages.py | 12 +++---- common/tasks/security.py | 2 +- plugins/admin_user/tasks.py | 8 ++--- plugins/cloud_init/tasks.py | 2 +- plugins/opennebula/tasks.py | 2 +- plugins/puppet/tasks.py | 4 +-- plugins/root_password/tasks.py | 2 +- plugins/vagrant/tasks.py | 10 +++--- providers/ec2/tasks/ami.py | 4 +-- providers/ec2/tasks/boot.py | 6 ++-- providers/ec2/tasks/network.py | 18 +++++------ providers/virtualbox/tasks/guest_additions.py | 6 ++-- 30 files changed, 98 insertions(+), 98 deletions(-) diff --git a/base/fs/partitionmaps/abstract.py b/base/fs/partitionmaps/abstract.py index 831d048..45f7814 100644 --- a/base/fs/partitionmaps/abstract.py +++ b/base/fs/partitionmaps/abstract.py @@ -37,13 +37,13 @@ class AbstractPartitionMap(FSMProxy): def _before_map(self, event): volume = event.volume try: - mappings = log_check_call(['/sbin/kpartx', '-l', volume.device_path]) + mappings = log_check_call(['kpartx', '-l', volume.device_path]) import re regexp = re.compile('^(?P.+[^\d](?P\d+)) : ' '(?P\d) (?P\d+) ' '{device_path} (?P\d+)$' .format(device_path=volume.device_path)) - log_check_call(['/sbin/kpartx', '-a', volume.device_path]) + log_check_call(['kpartx', '-a', volume.device_path]) import os.path for mapping in mappings: match = regexp.match(mapping) @@ -61,7 +61,7 @@ class AbstractPartitionMap(FSMProxy): for partition in self.partitions: if not partition.fsm.can('unmap'): partition.unmap() - log_check_call(['/sbin/kpartx', '-d', volume.device_path]) + log_check_call(['kpartx', '-d', volume.device_path]) raise e def unmap(self, volume): @@ -73,6 +73,6 @@ class AbstractPartitionMap(FSMProxy): if partition.fsm.cannot('unmap'): msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition) raise PartitionError(msg) - log_check_call(['/sbin/kpartx', '-d', volume.device_path]) + log_check_call(['kpartx', '-d', volume.device_path]) for partition in self.partitions: partition.unmap() diff --git a/base/fs/partitionmaps/gpt.py b/base/fs/partitionmaps/gpt.py index 12f3383..50ed2ab 100644 --- a/base/fs/partitionmaps/gpt.py +++ b/base/fs/partitionmaps/gpt.py @@ -46,7 +46,7 @@ class GPTPartitionMap(AbstractPartitionMap): def _before_create(self, event): volume = event.volume - log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path, + log_check_call(['parted', '--script', '--align', 'none', volume.device_path, '--', 'mklabel', 'gpt']) for partition in self.partitions: partition.create(volume) diff --git a/base/fs/partitionmaps/msdos.py b/base/fs/partitionmaps/msdos.py index 988178a..1634842 100644 --- a/base/fs/partitionmaps/msdos.py +++ b/base/fs/partitionmaps/msdos.py @@ -36,7 +36,7 @@ class MSDOSPartitionMap(AbstractPartitionMap): def _before_create(self, event): volume = event.volume - log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path, + log_check_call(['parted', '--script', '--align', 'none', volume.device_path, '--', 'mklabel', 'msdos']) for partition in self.partitions: partition.create(volume) diff --git a/base/fs/partitions/abstract.py b/base/fs/partitions/abstract.py index f987bfb..6aae8c1 100644 --- a/base/fs/partitions/abstract.py +++ b/base/fs/partitions/abstract.py @@ -26,14 +26,14 @@ class AbstractPartition(FSMProxy): if isinstance(self.source, AbstractPartition): self.source.mount(destination=mount_dir) else: - log_check_call(['/bin/mount'] + self.opts + [self.source, mount_dir]) + log_check_call(['mount'] + self.opts + [self.source, mount_dir]) self.mount_dir = mount_dir def unmount(self): if isinstance(self.source, AbstractPartition): self.source.unmount() else: - log_check_call(['/bin/umount', self.mount_dir]) + log_check_call(['umount', self.mount_dir]) del self.mount_dir def __init__(self, size, filesystem, format_command): @@ -47,7 +47,7 @@ class AbstractPartition(FSMProxy): super(AbstractPartition, self).__init__(cfg) def get_uuid(self): - [uuid] = log_check_call(['/sbin/blkid', '-s', 'UUID', '-o', 'value', self.device_path]) + [uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path]) return uuid @abstractmethod @@ -59,7 +59,7 @@ class AbstractPartition(FSMProxy): def _before_format(self, e): if self.format_command is None: - format_command = ['/sbin/mkfs.{fs}', '{device_path}'] + format_command = ['mkfs.{fs}', '{device_path}'] else: format_command = self.format_command variables = {'fs': self.filesystem, @@ -70,7 +70,7 @@ class AbstractPartition(FSMProxy): log_check_call(command) def _before_mount(self, e): - log_check_call(['/bin/mount', '--types', self.filesystem, self.device_path, e.destination]) + log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination]) self.mount_dir = e.destination def _after_mount(self, e): @@ -80,7 +80,7 @@ class AbstractPartition(FSMProxy): def _before_unmount(self, e): for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True): self.mounts[destination].unmount() - log_check_call(['/bin/umount', self.mount_dir]) + log_check_call(['umount', self.mount_dir]) del self.mount_dir def add_mount(self, source, destination, opts=[]): diff --git a/base/fs/partitions/base.py b/base/fs/partitions/base.py index 78b7e74..d8544f8 100644 --- a/base/fs/partitions/base.py +++ b/base/fs/partitions/base.py @@ -44,11 +44,11 @@ class BasePartition(AbstractPartition): create_command = ('mkpart primary {start} {end}' .format(start=str(self.get_start()), end=str(self.get_end()))) - log_check_call(['/sbin/parted', '--script', '--align', 'none', e.volume.device_path, + log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, '--', create_command]) for flag in self.flags: - log_check_call(['/sbin/parted', '--script', e.volume.device_path, + log_check_call(['parted', '--script', e.volume.device_path, '--', ('set {idx} {flag} on' .format(idx=str(self.get_index()), flag=flag))]) diff --git a/base/fs/partitions/gpt.py b/base/fs/partitions/gpt.py index 52996ab..8dce6dd 100644 --- a/base/fs/partitions/gpt.py +++ b/base/fs/partitions/gpt.py @@ -14,5 +14,5 @@ class GPTPartition(BasePartition): name_command = ('name {idx} {name}' .format(idx=self.get_index(), name=self.name)) - log_check_call(['/sbin/parted', '--script', e.volume.device_path, + log_check_call(['parted', '--script', e.volume.device_path, '--', name_command]) diff --git a/base/fs/partitions/gpt_swap.py b/base/fs/partitions/gpt_swap.py index 4af9245..e5fdc3d 100644 --- a/base/fs/partitions/gpt_swap.py +++ b/base/fs/partitions/gpt_swap.py @@ -8,4 +8,4 @@ class GPTSwapPartition(GPTPartition): super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous) def _before_format(self, e): - log_check_call(['/sbin/mkswap', self.device_path]) + log_check_call(['mkswap', self.device_path]) diff --git a/base/fs/partitions/msdos_swap.py b/base/fs/partitions/msdos_swap.py index f4e0339..18c30ff 100644 --- a/base/fs/partitions/msdos_swap.py +++ b/base/fs/partitions/msdos_swap.py @@ -8,4 +8,4 @@ class MSDOSSwapPartition(MSDOSPartition): super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous) def _before_format(self, e): - log_check_call(['/sbin/mkswap', self.device_path]) + log_check_call(['mkswap', self.device_path]) diff --git a/base/fs/volume.py b/base/fs/volume.py index 631c886..9f61541 100644 --- a/base/fs/volume.py +++ b/base/fs/volume.py @@ -76,12 +76,12 @@ class Volume(FSMProxy): if not hasattr(self, 'dm_node_name'): raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') - log_check_call(['/sbin/dmsetup', 'create', self.dm_node_name], table) + log_check_call(['dmsetup', 'create', self.dm_node_name], table) self.unlinked_device_path = self.device_path self.device_path = self.dm_node_path def _before_unlink_dm_node(self, e): - log_check_call(['/sbin/dmsetup', 'remove', self.dm_node_name]) + log_check_call(['dmsetup', 'remove', self.dm_node_name]) del self.dm_node_name del self.dm_node_path self.device_path = self.unlinked_device_path diff --git a/common/fs/loopbackvolume.py b/common/fs/loopbackvolume.py index 9f42728..87dfc1a 100644 --- a/common/fs/loopbackvolume.py +++ b/common/fs/loopbackvolume.py @@ -12,14 +12,14 @@ class LoopbackVolume(Volume): def _before_create(self, e): self.image_path = e.image_path vol_size = str(self.size.get_qty_in('MiB')) + 'M' - log_check_call(['/usr/bin/qemu-img', 'create', '-f', 'raw', self.image_path, vol_size]) + log_check_call(['qemu-img', 'create', '-f', 'raw', self.image_path, vol_size]) def _before_attach(self, e): - [self.loop_device_path] = log_check_call(['/sbin/losetup', '--show', '--find', self.image_path]) + [self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path]) self.device_path = self.loop_device_path def _before_detach(self, e): - log_check_call(['/sbin/losetup', '--detach', self.loop_device_path]) + log_check_call(['losetup', '--detach', self.loop_device_path]) del self.loop_device_path del self.device_path diff --git a/common/fs/qemuvolume.py b/common/fs/qemuvolume.py index 3383a0c..82091f8 100644 --- a/common/fs/qemuvolume.py +++ b/common/fs/qemuvolume.py @@ -9,7 +9,7 @@ class QEMUVolume(LoopbackVolume): def _before_create(self, e): self.image_path = e.image_path vol_size = str(self.size.get_qty_in('MiB')) + 'M' - log_check_call(['/usr/bin/qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size]) + log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size]) def _check_nbd_module(self): from base.fs.partitionmaps.none import NoPartitions @@ -40,11 +40,11 @@ class QEMUVolume(LoopbackVolume): def _before_attach(self, e): self._check_nbd_module() self.loop_device_path = self._find_free_nbd_device() - log_check_call(['/usr/bin/qemu-nbd', '--connect', self.loop_device_path, self.image_path]) + log_check_call(['qemu-nbd', '--connect', self.loop_device_path, self.image_path]) self.device_path = self.loop_device_path def _before_detach(self, e): - log_check_call(['/usr/bin/qemu-nbd', '--disconnect', self.loop_device_path]) + log_check_call(['qemu-nbd', '--disconnect', self.loop_device_path]) del self.loop_device_path del self.device_path diff --git a/common/tasks/apt.py b/common/tasks/apt.py index d39d0bf..a303600 100644 --- a/common/tasks/apt.py +++ b/common/tasks/apt.py @@ -87,8 +87,8 @@ class AptUpdate(Task): @classmethod def run(cls, info): - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'update']) + log_check_call(['chroot', info.root, + 'apt-get', 'update']) class AptUpgrade(Task): @@ -100,15 +100,15 @@ class AptUpgrade(Task): def run(cls, info): from subprocess import CalledProcessError try: - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'install', - '--fix-broken', - '--no-install-recommends', - '--assume-yes']) - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'upgrade', - '--no-install-recommends', - '--assume-yes']) + log_check_call(['chroot', info.root, + 'apt-get', 'install', + '--fix-broken', + '--no-install-recommends', + '--assume-yes']) + log_check_call(['chroot', info.root, + 'apt-get', 'upgrade', + '--no-install-recommends', + '--assume-yes']) except CalledProcessError as e: if e.returncode == 100: import logging @@ -125,9 +125,9 @@ class PurgeUnusedPackages(Task): @classmethod def run(cls, info): - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'autoremove', - '--purge']) + log_check_call(['chroot', info.root, + 'apt-get', 'autoremove', + '--purge']) class AptClean(Task): @@ -136,8 +136,8 @@ class AptClean(Task): @classmethod def run(cls, info): - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'clean']) + log_check_call(['chroot', info.root, + 'apt-get', 'clean']) lists = os.path.join(info.root, 'var/lib/apt/lists') for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]: diff --git a/common/tasks/boot.py b/common/tasks/boot.py index d66d97a..271598f 100644 --- a/common/tasks/boot.py +++ b/common/tasks/boot.py @@ -91,9 +91,9 @@ class InstallGrub(Task): idx=idx + 1)) # Install grub - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/sbin/grub-install', device_path]) - log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub']) + log_check_call(['chroot', info.root, + 'grub-install', device_path]) + log_check_call(['chroot', info.root, 'update-grub']) except Exception as e: if isinstance(info.volume, LoopbackVolume): remount(info.volume, unlink_fn) @@ -127,12 +127,12 @@ class InstallExtLinux(Task): bootloader = '/usr/lib/syslinux/gptmbr.bin' else: bootloader = '/usr/lib/extlinux/mbr.bin' - log_check_call(['/usr/sbin/chroot', info.root, - '/bin/dd', 'bs=440', 'count=1', + log_check_call(['chroot', info.root, + 'dd', 'bs=440', 'count=1', 'if=' + bootloader, 'of=' + info.volume.device_path]) - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/extlinux', + log_check_call(['chroot', info.root, + 'extlinux', '--install', '/boot/extlinux']) - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/sbin/extlinux-update']) + log_check_call(['chroot', info.root, + 'extlinux-update']) diff --git a/common/tasks/bootstrap.py b/common/tasks/bootstrap.py index 42d89ba..5aa04c4 100644 --- a/common/tasks/bootstrap.py +++ b/common/tasks/bootstrap.py @@ -17,7 +17,7 @@ class AddRequiredCommands(Task): def get_bootstrap_args(info): - executable = ['/usr/sbin/debootstrap'] + executable = ['debootstrap'] options = ['--arch=' + info.manifest.system['architecture']] if len(info.include_packages) > 0: options.append('--include=' + ','.join(info.include_packages)) diff --git a/common/tasks/cleanup.py b/common/tasks/cleanup.py index c6cc3c1..cd01854 100644 --- a/common/tasks/cleanup.py +++ b/common/tasks/cleanup.py @@ -29,7 +29,7 @@ class ShredHostkeys(Task): public = [path + '.pub' for path in private] from common.tools import log_check_call - log_check_call(['/usr/bin/shred', '--remove'] + private + public) + log_check_call(['shred', '--remove'] + private + public) class CleanTMP(Task): diff --git a/common/tasks/filesystem.py b/common/tasks/filesystem.py index b8f87c7..769ed12 100644 --- a/common/tasks/filesystem.py +++ b/common/tasks/filesystem.py @@ -43,7 +43,7 @@ class TuneVolumeFS(Task): for partition in info.volume.partition_map.partitions: if not isinstance(partition, UnformattedPartition): if re.match('^ext[2-4]$', partition.filesystem) is not None: - log_check_call(['/sbin/tune2fs', '-i', '0', partition.device_path]) + log_check_call(['tune2fs', '-i', '0', partition.device_path]) class AddXFSProgs(Task): diff --git a/common/tasks/initd.py b/common/tasks/initd.py index 99842f6..23e2c14 100644 --- a/common/tasks/initd.py +++ b/common/tasks/initd.py @@ -21,10 +21,10 @@ class InstallInitScripts(Task): dst = os.path.join(info.root, 'etc/init.d', name) copy(src, dst) os.chmod(dst, rwxr_xr_x) - log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--default', name]) + log_check_call(['chroot', info.root, 'insserv', '--default', name]) for name in info.initd['disable']: - log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--remove', name]) + log_check_call(['chroot', info.root, 'insserv', '--remove', name]) class AddExpandRoot(Task): @@ -49,8 +49,8 @@ class AddSSHKeyGeneration(Task): install = info.initd['install'] from subprocess import CalledProcessError try: - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/dpkg-query', '-W', 'openssh-server']) + log_check_call(['chroot', info.root, + 'dpkg-query', '-W', 'openssh-server']) if info.manifest.system['release'] == 'squeeze': install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys') else: diff --git a/common/tasks/locale.py b/common/tasks/locale.py index 7eb15e4..970e5af 100644 --- a/common/tasks/locale.py +++ b/common/tasks/locale.py @@ -28,12 +28,12 @@ class GenerateLocale(Task): search = '# ' + locale_str sed_i(locale_gen, search, locale_str) - log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/locale-gen']) + log_check_call(['chroot', info.root, 'locale-gen']) lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'], charmap=info.manifest.system['charmap']) - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/sbin/update-locale', 'LANG=' + lang]) + log_check_call(['chroot', info.root, + 'update-locale', 'LANG=' + lang]) class SetTimezone(Task): diff --git a/common/tasks/packages.py b/common/tasks/packages.py index bf171e7..d5bc921 100644 --- a/common/tasks/packages.py +++ b/common/tasks/packages.py @@ -45,10 +45,10 @@ class InstallPackages(Task): try: env = os.environ.copy() env['DEBIAN_FRONTEND'] = 'noninteractive' - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/apt-get', 'install', - '--no-install-recommends', - '--assume-yes'] + log_check_call(['chroot', info.root, + 'apt-get', 'install', + '--no-install-recommends', + '--assume-yes'] + map(str, remote_packages), env=env) except CalledProcessError as e: @@ -90,8 +90,8 @@ class InstallPackages(Task): env = os.environ.copy() env['DEBIAN_FRONTEND'] = 'noninteractive' - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/dpkg', '--install'] + log_check_call(['chroot', info.root, + 'dpkg', '--install'] + chrooted_package_paths, env=env) diff --git a/common/tasks/security.py b/common/tasks/security.py index ef109ee..e4339ba 100644 --- a/common/tasks/security.py +++ b/common/tasks/security.py @@ -10,7 +10,7 @@ class EnableShadowConfig(Task): @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, '/sbin/shadowconfig', 'on']) + log_check_call(['chroot', info.root, 'shadowconfig', 'on']) class DisableSSHPasswordAuthentication(Task): diff --git a/plugins/admin_user/tasks.py b/plugins/admin_user/tasks.py index eb572e8..ab0131d 100644 --- a/plugins/admin_user/tasks.py +++ b/plugins/admin_user/tasks.py @@ -22,8 +22,8 @@ class CreateAdminUser(Task): @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/sbin/useradd', + log_check_call(['chroot', info.root, + 'useradd', '--create-home', '--shell', '/bin/bash', info.manifest.plugins['admin_user']['username']]) @@ -65,8 +65,8 @@ class DisableRootLogin(Task): from subprocess import CalledProcessError from common.tools import log_check_call try: - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/dpkg-query', '-W', 'openssh-server']) + log_check_call(['chroot', info.root, + 'dpkg-query', '-W', 'openssh-server']) from common.tools import sed_i sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') sed_i(sshdconfig_path, 'PermitRootLogin yes', 'PermitRootLogin no') diff --git a/plugins/cloud_init/tasks.py b/plugins/cloud_init/tasks.py index 75700bd..b4a6932 100644 --- a/plugins/cloud_init/tasks.py +++ b/plugins/cloud_init/tasks.py @@ -68,7 +68,7 @@ class SetMetadataSource(Task): logging.getLogger(__name__).warn(msg) return sources = "cloud-init cloud-init/datasources multiselect " + sources - log_check_call(['/usr/sbin/chroot', info.root, '/usr/bin/debconf-set-selections'], sources) + log_check_call(['chroot', info.root, 'debconf-set-selections'], sources) class DisableModules(Task): diff --git a/plugins/opennebula/tasks.py b/plugins/opennebula/tasks.py index ecb5bb3..c9ea35b 100644 --- a/plugins/opennebula/tasks.py +++ b/plugins/opennebula/tasks.py @@ -27,7 +27,7 @@ class OpenNebulaContext(Task): sed_i(vmcontext_def, '# Default-Start:', '# Default-Start: 2 3 4 5') from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, 'update-rc.d', 'vmcontext', 'start', + log_check_call(['chroot', info.root, 'update-rc.d', 'vmcontext', 'start', '90', '2', '3', '4', '5', 'stop', '90', '0', '6']) from shutil import copy diff --git a/plugins/puppet/tasks.py b/plugins/puppet/tasks.py index 8b0559c..5545bf1 100644 --- a/plugins/puppet/tasks.py +++ b/plugins/puppet/tasks.py @@ -84,8 +84,8 @@ class ApplyPuppetManifest(Task): manifest_path = os.path.join('/', manifest_rel_dst) from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/puppet', 'apply', manifest_path]) + log_check_call(['chroot', info.root, + 'puppet', 'apply', manifest_path]) os.remove(manifest_dst) from common.tools import sed_i diff --git a/plugins/root_password/tasks.py b/plugins/root_password/tasks.py index 85776cc..53ad7fe 100644 --- a/plugins/root_password/tasks.py +++ b/plugins/root_password/tasks.py @@ -9,5 +9,5 @@ class SetRootPassword(Task): @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'], + log_check_call(['chroot', info.root, 'chpasswd'], 'root:' + info.manifest.plugins['root_password']['password']) diff --git a/plugins/vagrant/tasks.py b/plugins/vagrant/tasks.py index 4118e08..2d7e178 100644 --- a/plugins/vagrant/tasks.py +++ b/plugins/vagrant/tasks.py @@ -71,8 +71,8 @@ class CreateVagrantUser(Task): @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/sbin/useradd', + log_check_call(['chroot', info.root, + 'useradd', '--create-home', '--shell', '/bin/bash', 'vagrant']) @@ -115,8 +115,8 @@ class AddInsecurePublicKey(Task): # We can't do this directly with python, since getpwnam gets its info from the host from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, - '/bin/chown', 'vagrant:vagrant', + log_check_call(['chroot', info.root, + 'chown', 'vagrant:vagrant', '/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys']) @@ -127,7 +127,7 @@ class SetRootPassword(Task): @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'], 'root:vagrant') + log_check_call(['chroot', info.root, 'chpasswd'], 'root:vagrant') class PackageBox(Task): diff --git a/providers/ec2/tasks/ami.py b/providers/ec2/tasks/ami.py index 28de349..456d269 100644 --- a/providers/ec2/tasks/ami.py +++ b/providers/ec2/tasks/ami.py @@ -39,7 +39,7 @@ class BundleImage(Task): bundle_name = 'bundle-{id}'.format(id=info.run_id) info.bundle_path = os.path.join(info.workspace, bundle_name) arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) - log_check_call(['/usr/bin/euca-bundle-image', + log_check_call(['euca-bundle-image', '--image', info.volume.image_path, '--arch', arch, '--user', info.credentials['user-id'], @@ -65,7 +65,7 @@ class UploadImage(Task): else: s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info.host['region']) info.manifest.manifest_location = info.manifest.image['bucket'] + '/' + info.ami_name + '.manifest.xml' - log_check_call(['/usr/bin/euca-upload-bundle', + log_check_call(['euca-upload-bundle', '--bucket', info.manifest.image['bucket'], '--manifest', manifest_file, '--access-key', info.credentials['access-key'], diff --git a/providers/ec2/tasks/boot.py b/providers/ec2/tasks/boot.py index 399cbac..7c7c1e3 100644 --- a/providers/ec2/tasks/boot.py +++ b/providers/ec2/tasks/boot.py @@ -45,6 +45,6 @@ class ConfigurePVGrub(Task): 'GRUB_HIDDEN_TIMEOUT=true') from common.tools import log_check_call - log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub']) - log_check_call(['/usr/sbin/chroot', info.root, - '/bin/ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst']) + log_check_call(['chroot', info.root, 'update-grub']) + log_check_call(['chroot', info.root, + 'ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst']) diff --git a/providers/ec2/tasks/network.py b/providers/ec2/tasks/network.py index cbaf4ac..6c13a32 100644 --- a/providers/ec2/tasks/network.py +++ b/providers/ec2/tasks/network.py @@ -40,17 +40,17 @@ class InstallEnhancedNetworking(Task): urllib.urlretrieve(drivers_url, archive) from common.tools import log_check_call - log_check_call('/bin/tar', '--ungzip', - '--extract', - '--file', archive, - '--directory', os.path.join(info.root, 'tmp')) + log_check_call('tar', '--ungzip', + '--extract', + '--file', archive, + '--directory', os.path.join(info.root, 'tmp')) src_dir = os.path.join('/tmp', os.path.basename(drivers_url), 'src') - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/make', '--directory', src_dir]) - log_check_call(['/usr/sbin/chroot', info.root, - '/usr/bin/make', 'install', - '--directory', src_dir]) + log_check_call(['chroot', info.root, + 'make', '--directory', src_dir]) + log_check_call(['chroot', info.root, + 'make', 'install', + '--directory', src_dir]) ixgbevf_conf_path = os.path.join(info.root, 'etc/modprobe.d/ixgbevf.conf') with open(ixgbevf_conf_path, 'w') as ixgbevf_conf: diff --git a/providers/virtualbox/tasks/guest_additions.py b/providers/virtualbox/tasks/guest_additions.py index 37dd537..1277f42 100644 --- a/providers/virtualbox/tasks/guest_additions.py +++ b/providers/virtualbox/tasks/guest_additions.py @@ -29,8 +29,8 @@ class AddGuestAdditionsPackages(Task): info.packages.add('dkms') from common.tools import log_check_call - [kernel_version] = log_check_call(['/usr/sbin/chroot', info.root, - '/bin/uname', '-r']) + [kernel_version] = log_check_call(['chroot', info.root, + 'uname', '-r']) kernel_headers_pkg = 'linux-headers-{version}'.format(version=kernel_version) info.packages.add(kernel_headers_pkg) @@ -52,7 +52,7 @@ class InstallGuestAdditions(Task): install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') from common.tools import log_call - status, out, err = log_call(['/usr/sbin/chroot', info.root, + status, out, err = log_call(['chroot', info.root, install_script, '--nox11']) # Install will exit with $?=1 because X11 isn't installed if status != 1: From 210d3261c0c8399cbcba8d5d3d81b6ed659232a0 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 22:22:29 +0100 Subject: [PATCH 14/22] Check if current user is root. Fixes #20 --- base/main.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/base/main.py b/base/main.py index 007157a..8d28c44 100644 --- a/base/main.py +++ b/base/main.py @@ -3,8 +3,12 @@ log = logging.getLogger(__name__) def main(): - import log + import os args = get_args() + # Require root privileges, except when doing a dry-run where they aren't needed + if os.geteuid() != 0 and not args.dry_run: + raise Exception('This program requires root privileges.') + import log logfile = log.get_logfile_path(args.manifest) log.setup_logger(logfile=logfile, debug=args.debug) run(args) @@ -40,7 +44,7 @@ def run(args): except (Exception, KeyboardInterrupt) as e: log.exception(e) if args.pause_on_error: - raw_input("Press Enter to commence rollback") + raw_input('Press Enter to commence rollback') log.error('Rolling back') rollback_tasklist = TaskList() From cdd372ca3b9d276cd7c00a3a093724a3e1349108 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Feb 2014 22:54:25 +0100 Subject: [PATCH 15/22] Zerofree can be installed as a Debian package Show alternate message when package string for host_dependencies starts with http:// or https:// --- common/tasks/host.py | 12 ++++-- plugins/minimize_size/__init__.py | 9 ++--- .../manifest-schema-zerofree.json | 18 --------- plugins/minimize_size/manifest-schema.json | 8 +--- plugins/minimize_size/tasks.py | 37 +++++-------------- 5 files changed, 23 insertions(+), 61 deletions(-) delete mode 100644 plugins/minimize_size/manifest-schema-zerofree.json diff --git a/common/tasks/host.py b/common/tasks/host.py index 0522a49..5544b04 100644 --- a/common/tasks/host.py +++ b/common/tasks/host.py @@ -11,14 +11,20 @@ class CheckExternalCommands(Task): def run(cls, info): from common.tools import log_check_call from subprocess import CalledProcessError + import re missing_packages = [] for command, package in info.host_dependencies.items(): try: log_check_call(['type ' + command], shell=True) except CalledProcessError: - msg = ('The command `{command}\' is not available, ' - 'it is located in the package `{package}\'.' - .format(command=command, package=package)) + if re.match('^https?:\/\/', package): + msg = ('The command `{command}\' is not available, ' + 'you can download the software at `{package}\'.' + .format(command=command, package=package)) + else: + msg = ('The command `{command}\' is not available, ' + 'it is located in the package `{package}\'.' + .format(command=command, package=package)) missing_packages.append(msg) if len(missing_packages) > 0: msg = '\n'.join(missing_packages) diff --git a/plugins/minimize_size/__init__.py b/plugins/minimize_size/__init__.py index d49b551..f5ab69c 100644 --- a/plugins/minimize_size/__init__.py +++ b/plugins/minimize_size/__init__.py @@ -5,9 +5,6 @@ def validate_manifest(data, validator, error): import os.path schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.json') validator(data, schema_path) - if 'zerofree' in data['plugins']['minimize_size']: - zerofree_schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema-zerofree.json') - validator(data, zerofree_schema_path) if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk': error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink']) @@ -16,11 +13,11 @@ def resolve_tasks(taskset, manifest): taskset.update([tasks.AddFolderMounts, tasks.RemoveFolderMounts, ]) - if 'zerofree' in manifest.plugins['minimize_size']: - taskset.add(tasks.CheckZerofreePath) + if manifest.plugins['minimize_size'].get('zerofree', False): + taskset.add(tasks.AddRequiredCommands) taskset.add(tasks.Zerofree) if manifest.plugins['minimize_size'].get('shrink', False): - taskset.add(tasks.CheckVMWareDMCommand) + taskset.add(tasks.AddRequiredCommands) taskset.add(tasks.ShrinkVolume) diff --git a/plugins/minimize_size/manifest-schema-zerofree.json b/plugins/minimize_size/manifest-schema-zerofree.json deleted file mode 100644 index bd6715d..0000000 --- a/plugins/minimize_size/manifest-schema-zerofree.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Minimize size plugin manifest", - "type": "object", - "properties": { - "volume": { - "type": "object", - "properties": { - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none"] } - } - } - } - } - } -} diff --git a/plugins/minimize_size/manifest-schema.json b/plugins/minimize_size/manifest-schema.json index 0d7942c..0181fa2 100644 --- a/plugins/minimize_size/manifest-schema.json +++ b/plugins/minimize_size/manifest-schema.json @@ -10,16 +10,10 @@ "type": "object", "properties": { "shrink": { "type": "boolean" }, - "zerofree": { "$ref": "#/definitions/absolute_path" } + "zerofree": { "type": "boolean" } } } } } - }, - "definitions": { - "absolute_path": { - "type": "string", - "pattern": "^/[^\\0]+$" - } } } diff --git a/plugins/minimize_size/tasks.py b/plugins/minimize_size/tasks.py index 37798ae..b04f301 100644 --- a/plugins/minimize_size/tasks.py +++ b/plugins/minimize_size/tasks.py @@ -3,6 +3,7 @@ from common import phases from common.tasks import apt from common.tasks import bootstrap from common.tasks import filesystem +from common.tasks import host from common.tasks import partitioning from common.tasks import volume import os @@ -46,22 +47,20 @@ class RemoveFolderMounts(Task): del info.minimize_size_folder -class CheckZerofreePath(Task): - description = 'Checking path to zerofree tool' +class AddRequiredCommands(Task): + description = 'Adding commands required for reducing volume size' phase = phases.preparation + successors = [host.CheckExternalCommands] @classmethod def run(cls, info): - from common.exceptions import TaskError - import os - zerofree = info.manifest.plugins['minimize_size']['zerofree'] - if not os.path.isfile(zerofree): - raise TaskError('The path `{path}\' does not exist or is not a file'.format(path=zerofree)) - if not os.access(zerofree, os.X_OK): - raise TaskError('The path `{path}\' is not executable'.format(path=zerofree)) + if info.manifest.plugins['minimize_size'].get('zerofree', False): + info.host_dependencies['zerofree'] = 'zerofree' + if info.manifest.plugins['minimize_size'].get('shrink', False): + link = 'https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0' + info.host_dependencies['vmware-vdiskmanager'] = link -# Get zerofree here: http://intgat.tigress.co.uk/rmy/uml/index.html class Zerofree(Task): description = 'Zeroing unused blocks on the volume' phase = phases.volume_unmounting @@ -71,23 +70,7 @@ class Zerofree(Task): @classmethod def run(cls, info): from common.tools import log_check_call - zerofree = info.manifest.plugins['minimize_size']['zerofree'] - log_check_call([zerofree, info.volume.device_path]) - - -class CheckVMWareDMCommand(Task): - description = 'Checking path to vmware-vdiskmanager tool' - phase = phases.preparation - - @classmethod - def run(cls, info): - from common.exceptions import TaskError - import os - vdiskmngr = '/usr/bin/vmware-vdiskmanager' - if not os.path.isfile(vdiskmngr): - raise TaskError('Unable to find vmware-vdiskmanager at `{path}\''.format(path=vdiskmngr)) - if not os.access(vdiskmngr, os.X_OK): - raise TaskError('vmware-vdiskmanager at `{path}\' is not executable'.format(path=vdiskmngr)) + log_check_call(['zerofree', info.volume.device_path]) class ShrinkVolume(Task): From c4794ce9f3007ddf5420e421fea71c99863e73b3 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Thu, 6 Mar 2014 20:37:07 +0100 Subject: [PATCH 16/22] Remove error when adding existing package with same target --- base/pkg/packagelist.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/base/pkg/packagelist.py b/base/pkg/packagelist.py index 762bb4a..06231d4 100644 --- a/base/pkg/packagelist.py +++ b/base/pkg/packagelist.py @@ -34,12 +34,13 @@ class PackageList(object): target = target.format(**self.manifest_vars) package = next((pkg for pkg in self.remote() if pkg.name == name), None) if package is not None: - same_target = package.target != target + same_target = package.target == target same_target = same_target or package.target is None and target == self.default_target same_target = same_target or package.target == self.default_target and target is None if not same_target: msg = ('The package {name} was already added to the package list, ' - 'but with another target release ({target})').format(name=name, target=package.target) + 'but with target release `{target}\' instead of `{add_target}\'' + .format(name=name, target=package.target, add_target=target)) raise PackageError(msg) return From b692532d90731e46e557e9cf3b1831ce97915e21 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Thu, 6 Mar 2014 20:41:11 +0100 Subject: [PATCH 17/22] Split up CheckPaths task in puppet plugin --- plugins/puppet/__init__.py | 3 ++- plugins/puppet/tasks.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/plugins/puppet/__init__.py b/plugins/puppet/__init__.py index 089a0ca..458b1cd 100644 --- a/plugins/puppet/__init__.py +++ b/plugins/puppet/__init__.py @@ -8,9 +8,10 @@ def validate_manifest(data, validator, error): def resolve_tasks(taskset, manifest): - taskset.add(tasks.CheckPaths) taskset.add(tasks.AddPackages) if 'assets' in manifest.plugins['puppet']: + taskset.add(tasks.CheckAssetsPath) taskset.add(tasks.CopyPuppetAssets) if 'manifest' in manifest.plugins['puppet']: + taskset.add(tasks.CheckManifestPath) taskset.add(tasks.ApplyPuppetManifest) diff --git a/plugins/puppet/tasks.py b/plugins/puppet/tasks.py index 5545bf1..369979e 100644 --- a/plugins/puppet/tasks.py +++ b/plugins/puppet/tasks.py @@ -5,8 +5,8 @@ from common.tasks import network import os -class CheckPaths(Task): - description = 'Checking whether manifest and assets paths exist' +class CheckAssetsPath(Task): + description = 'Checking whether the assets path exist' phase = phases.preparation @classmethod @@ -20,6 +20,14 @@ class CheckPaths(Task): msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets) raise TaskError(msg) + +class CheckManifestPath(Task): + description = 'Checking whether the manifest path exist' + phase = phases.preparation + + @classmethod + def run(cls, info): + from common.exceptions import TaskError manifest = info.manifest.plugins['puppet']['manifest'] if not os.path.exists(manifest): msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest) From 811320f664e05426787a498c3a525aaec3a59cb0 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Fri, 7 Mar 2014 08:10:53 +0100 Subject: [PATCH 18/22] Fix syntax errors in pkg-kernels json Fixes #28 --- providers/ec2/tasks/packages-kernels.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/ec2/tasks/packages-kernels.json b/providers/ec2/tasks/packages-kernels.json index 6d33632..1e42622 100644 --- a/providers/ec2/tasks/packages-kernels.json +++ b/providers/ec2/tasks/packages-kernels.json @@ -6,7 +6,7 @@ "wheezy": {"i386": "linux-image-686", "amd64": "linux-image-amd64"}, -"jessie": { +"jessie": {"i386": "linux-image-686", - "amd64": "linux-image-amd64"}, + "amd64": "linux-image-amd64"} } From a06b4463d3fea9fe99dac5ddab9feb27e045ffc3 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sat, 22 Mar 2014 15:13:22 +0100 Subject: [PATCH 19/22] Fix links in README --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7d0d12d..ea63c6a 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,13 @@ bootstrap-vz =========================================== bootstrap-vz is a bootstrapping framework for Debian. -It is is specifically intended to bootstrap systems for virtualized environments. -It runs without any user intervention and generates ready-to-boot images for -[a number of virtualization platforms(http://andsens.github.io/bootstrap-vz/providers.html). -Its aim is to provide a reproducable bootstrapping process using manifests -as well as supporting a high degree of customizability through plugins.
-bootstrap-vz was coded from scratch in python once the bash scripts that were used in the -[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached their +It is is specifically targeted at bootstrapping systems for virtualized environments. +bootstrap-vz runs without any user intervention and generates ready-to-boot images for +[a number of virtualization platforms](http://andsens.github.io/bootstrap-vz/providers.html). +Its aim is to provide a reproducable bootstrapping process using [manifests](http://andsens.github.io/bootstrap-vz/manifest.html) as well as supporting a high degree of customizability through plugins. + +bootstrap-vz was coded from scratch in python once the bash script architecture that was used in the +[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached its limits. Documentation @@ -16,5 +16,5 @@ Documentation The documentation for bootstrap-vz is available at [andsens.github.io/bootstrap-vz](http://andsens.github.io/bootstrap-vz). There, you can discover [what the dependencies](http://andsens.github.io/bootstrap-vz/#dependencies) -for a specific cloud provider are, [see the list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html) +for a specific cloud provider are, [see a list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html) and learn [how you create a manifest](http://andsens.github.io/bootstrap-vz/manifest.html). From ca13d66b167d7251e5174c7530ff556438e15ad7 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sat, 22 Mar 2014 15:13:43 +0100 Subject: [PATCH 20/22] losetup dependency was suggesting wrong package --- common/tasks/loopback.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/tasks/loopback.py b/common/tasks/loopback.py index 615de0d..8700feb 100644 --- a/common/tasks/loopback.py +++ b/common/tasks/loopback.py @@ -17,7 +17,7 @@ class AddRequiredCommands(Task): info.host_dependencies['losetup'] = 'mount' from common.fs.qemuvolume import QEMUVolume if isinstance(info.volume, QEMUVolume): - info.host_dependencies['losetup'] = 'qemu-nbd' + info.host_dependencies['losetup'] = 'mount' class Create(Task): From da4b85c0c7bb78fb53fedb0224194aaebd0c7525 Mon Sep 17 00:00:00 2001 From: Anders Ingemann Date: Sun, 23 Mar 2014 16:04:03 +0100 Subject: [PATCH 21/22] Everything in base/ is now commented. --- base/__init__.py | 7 +++ base/bootstrapinfo.py | 41 ++++++++++++++ base/fs/__init__.py | 12 +++++ base/fs/exceptions.py | 4 ++ base/fs/partitionmaps/abstract.py | 50 +++++++++++++++++ base/fs/partitionmaps/gpt.py | 22 ++++++++ base/fs/partitionmaps/msdos.py | 17 ++++++ base/fs/partitionmaps/none.py | 20 +++++++ base/fs/partitions/abstract.py | 70 ++++++++++++++++++++++++ base/fs/partitions/base.py | 46 ++++++++++++++++ base/fs/partitions/gpt.py | 11 ++++ base/fs/partitions/gpt_swap.py | 7 +++ base/fs/partitions/msdos.py | 2 + base/fs/partitions/msdos_swap.py | 7 +++ base/fs/partitions/single.py | 8 +++ base/fs/partitions/unformatted.py | 9 ++++ base/fs/volume.py | 59 ++++++++++++++++++++ base/log.py | 33 ++++++++++++ base/main.py | 40 ++++++++++++++ base/manifest.py | 58 ++++++++++++++++++++ base/phase.py | 17 ++++++ base/pkg/exceptions.py | 4 ++ base/pkg/packagelist.py | 57 ++++++++++++++++++++ base/pkg/sourceslist.py | 42 +++++++++++++++ base/task.py | 20 +++++++ base/tasklist.py | 89 +++++++++++++++++++++++++++++-- plugins/puppet/tasks.py | 10 ++++ 27 files changed, 757 insertions(+), 5 deletions(-) diff --git a/base/__init__.py b/base/__init__.py index 4e11028..a448245 100644 --- a/base/__init__.py +++ b/base/__init__.py @@ -5,6 +5,13 @@ from main import main def validate_manifest(data, validator, error): + """Validates the manifest using the base manifest + + Args: + data (dict): The data of the manifest + validator (function): The function that validates the manifest given the data and a path + error (function): The function tha raises an error when the validation fails + """ import os.path schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) validator(data, schema_path) diff --git a/base/bootstrapinfo.py b/base/bootstrapinfo.py index bcaf881..06d529b 100644 --- a/base/bootstrapinfo.py +++ b/base/bootstrapinfo.py @@ -1,26 +1,46 @@ class BootstrapInformation(object): + """The BootstrapInformation class holds all information about the bootstrapping process. + The nature of the attributes of this class are rather diverse. + Tasks may set their own attributes on this class for later retrieval by another task. + Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed. + """ def __init__(self, manifest=None, debug=False): + """Instantiates a new bootstrap info object. + + Args: + manifest (Manifest): The manifest + debug (bool): Whether debugging is turned on + """ + # Set the manifest attribute. self.manifest = manifest self.debug = debug + # Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process import random self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8)) + # Define the path to our workspace import os.path self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id) + # Load all the volume information from fs import load_volume self.volume = load_volume(self.manifest.volume, manifest.system['bootloader']) + # The default apt mirror self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian') + # Normalize the release codenames so that tasks may query for release codenames rather than + # 'stable', 'unstable' etc. This is useful when handling cases that are specific to a release. release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json') from common.tools import config_get self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']]) class DictClass(dict): + """Tiny extension of dict to allow setting and getting keys via attributes + """ def __getattr__(self, name): return self[name] @@ -28,18 +48,29 @@ class BootstrapInformation(object): self[name] = value def set_manifest_vars(obj, data): + """Runs through the manifest and creates DictClasses for every key + + Args: + obj (dict): dictionary to set the values on + data (dict): dictionary of values to set on the obj + """ for key, value in data.iteritems(): if isinstance(value, dict): obj[key] = DictClass() set_manifest_vars(obj[key], value) continue + # Lists are not supported if not isinstance(value, list): obj[key] = value + # manifest_vars is a dictionary of all the manifest values, + # with it users can cross-reference values in the manifest, so that they do not need to be written twice self.manifest_vars = {} self.manifest_vars['apt_mirror'] = self.apt_mirror set_manifest_vars(self.manifest_vars, self.manifest.data) + # Populate the manifest_vars with datetime information + # and map the datetime variables directly to the dictionary from datetime import datetime now = datetime.now() time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H', @@ -48,13 +79,23 @@ class BootstrapInformation(object): for key in time_vars: self.manifest_vars[key] = now.strftime(key) + # Keep a list of apt sources, + # so that tasks may add to that list without having to fiddle with apt source list files. from pkg.sourceslist import SourceLists self.source_lists = SourceLists(self.manifest_vars) + # Keep a list of packages that should be installed, tasks can add and remove things from this list from pkg.packagelist import PackageList self.packages = PackageList(self.manifest_vars, self.source_lists) + + # These sets should rarely be used and specify which packages the debootstrap invocation + # should be called with. self.include_packages = set() self.exclude_packages = set() + # Dictionary to specify which commands are required on the host. + # The keys are commands, while the values are either package names or urls + # that hint at how a command may be made available. self.host_dependencies = {} + # Lists of startup scripts that should be installed and disabled self.initd = {'install': {}, 'disable': []} diff --git a/base/fs/__init__.py b/base/fs/__init__.py index f19e189..5773f47 100644 --- a/base/fs/__init__.py +++ b/base/fs/__init__.py @@ -1,10 +1,19 @@ def load_volume(data, bootloader): + """Instantiates a volume that corresponds to the data in the manifest + Args: + data (dict): The 'volume' section from the manifest + bootloader (str): Name of the bootloader the system will boot with + + Returns: + Volume. The volume that represents all information pertaining to the volume we bootstrap on + """ from common.fs.loopbackvolume import LoopbackVolume from providers.ec2.ebsvolume import EBSVolume from common.fs.virtualdiskimage import VirtualDiskImage from common.fs.virtualmachinedisk import VirtualMachineDisk + # Create a mapping between valid partition maps in the manifest and their corresponding classes from partitionmaps.gpt import GPTPartitionMap from partitionmaps.msdos import MSDOSPartitionMap from partitionmaps.none import NoPartitions @@ -12,11 +21,14 @@ def load_volume(data, bootloader): 'gpt': GPTPartitionMap, 'msdos': MSDOSPartitionMap, } + # Instantiate the partition map partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader) + # Create a mapping between valid volume backings in the manifest and their corresponding classes volume_backings = {'raw': LoopbackVolume, 's3': LoopbackVolume, 'vdi': VirtualDiskImage, 'vmdk': VirtualMachineDisk, 'ebs': EBSVolume } + # Create the volume with the partition map as an argument return volume_backings.get(data['backing'])(partition_map) diff --git a/base/fs/exceptions.py b/base/fs/exceptions.py index bc38490..fad7868 100644 --- a/base/fs/exceptions.py +++ b/base/fs/exceptions.py @@ -1,8 +1,12 @@ class VolumeError(Exception): + """Raised when an error occurs while interacting with the volume + """ pass class PartitionError(Exception): + """Raised when an error occurs while interacting with the partitions on the volume + """ pass diff --git a/base/fs/partitionmaps/abstract.py b/base/fs/partitionmaps/abstract.py index 45f7814..509b7b1 100644 --- a/base/fs/partitionmaps/abstract.py +++ b/base/fs/partitionmaps/abstract.py @@ -6,25 +6,50 @@ from ..exceptions import PartitionError class AbstractPartitionMap(FSMProxy): + """Abstract representation of a partiton map + This class is a finite state machine and represents the state of the real partition map + """ __metaclass__ = ABCMeta + # States the partition map can be in events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, ] def __init__(self, bootloader): + """ + Args: + bootloader (str): Name of the bootloader we will use for bootstrapping + """ + # Create the configuration for the state machine cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} super(AbstractPartitionMap, self).__init__(cfg) def is_blocking(self): + """Returns whether the partition map is blocking volume detach operations + + Returns: + bool. + """ return self.fsm.current == 'mapped' def get_total_size(self): + """Returns the total size the partitions occupy + + Returns: + Bytes. The size of all the partitions + """ + # We just need the endpoint of the last partition return self.partitions[-1].get_end() def create(self, volume): + """Creates the partition map + + Args: + volume (Volume): The volume to create the partition map on + """ self.fsm.create(volume=volume) @abstractmethod @@ -32,11 +57,21 @@ class AbstractPartitionMap(FSMProxy): pass def map(self, volume): + """Maps the partition map to device nodes + + Args: + volume (Volume): The volume the partition map resides on + """ self.fsm.map(volume=volume) def _before_map(self, event): + """ + Raises: + PartitionError + """ volume = event.volume try: + # Ask kpartx how the partitions will be mapped before actually attaching them. mappings = log_check_call(['kpartx', '-l', volume.device_path]) import re regexp = re.compile('^(?P.+[^\d](?P\d+)) : ' @@ -45,6 +80,7 @@ class AbstractPartitionMap(FSMProxy): .format(device_path=volume.device_path)) log_check_call(['kpartx', '-a', volume.device_path]) import os.path + # Run through the kpartx output and map the paths to the partitions for mapping in mappings: match = regexp.match(mapping) if match is None: @@ -53,11 +89,13 @@ class AbstractPartitionMap(FSMProxy): p_idx = int(match.group('p_idx')) - 1 self.partitions[p_idx].map(partition_path) + # Check if any partition was not mapped for idx, partition in enumerate(self.partitions): if partition.fsm.current not in ['mapped', 'formatted']: raise PartitionError('kpartx did not map partition #{idx}'.format(idx=idx + 1)) except PartitionError as e: + # Revert any mapping and reraise the error for partition in self.partitions: if not partition.fsm.can('unmap'): partition.unmap() @@ -65,14 +103,26 @@ class AbstractPartitionMap(FSMProxy): raise e def unmap(self, volume): + """Unmaps the partition + + Args: + volume (Volume): The volume to unmap the partition map from + """ self.fsm.unmap(volume=volume) def _before_unmap(self, event): + """ + Raises: + PartitionError + """ volume = event.volume + # Run through all partitions before unmapping and make sure they can all be unmapped for partition in self.partitions: if partition.fsm.cannot('unmap'): msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition) raise PartitionError(msg) + # Actually unmap the partitions log_check_call(['kpartx', '-d', volume.device_path]) + # Call unmap on all partitions for partition in self.partitions: partition.unmap() diff --git a/base/fs/partitionmaps/gpt.py b/base/fs/partitionmaps/gpt.py index 50ed2ab..bd0222c 100644 --- a/base/fs/partitionmaps/gpt.py +++ b/base/fs/partitionmaps/gpt.py @@ -5,23 +5,38 @@ from common.tools import log_check_call class GPTPartitionMap(AbstractPartitionMap): + """Represents a GPT partition map + """ def __init__(self, data, bootloader): + """ + Args: + data (dict): volume.partitions part of the manifest + bootloader (str): Name of the bootloader we will use for bootstrapping + """ from common.bytes import Bytes + # List of partitions self.partitions = [] + # Returns the last partition unless there is none def last_partition(): return self.partitions[-1] if len(self.partitions) > 0 else None + # GPT offset gpt_offset = Bytes('17KiB') + # If we are using the grub bootloader we need to create an unformatted partition + # at the beginning of the map. Its size is 1007kb, which we will steal from the + # next partition. if bootloader == 'grub': from ..partitions.unformatted import UnformattedPartition self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition()) self.grub_boot.offset = gpt_offset + # Mark the partition as a bios_grub partition self.grub_boot.flags.append('bios_grub') self.partitions.append(self.grub_boot) + # The boot and swap partitions are optional if 'boot' in data: self.boot = GPTPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], data['boot'].get('format_command', None), @@ -35,6 +50,8 @@ class GPTPartitionMap(AbstractPartitionMap): 'root', last_partition()) self.partitions.append(self.root) + # Depending on whether we have a grub boot partition + # we will need to set the offset accordingly. if hasattr(self, 'grub_boot'): self.partitions[1].size -= gpt_offset self.partitions[1].size -= self.grub_boot.size @@ -45,8 +62,13 @@ class GPTPartitionMap(AbstractPartitionMap): super(GPTPartitionMap, self).__init__(bootloader) def _before_create(self, event): + """Creates the partition map + """ volume = event.volume + # Disk alignment still plays a role in virtualized environment, + # but I honestly have no clue as to what best practice is here, so we choose 'none' log_check_call(['parted', '--script', '--align', 'none', volume.device_path, '--', 'mklabel', 'gpt']) + # Create the partitions for partition in self.partitions: partition.create(volume) diff --git a/base/fs/partitionmaps/msdos.py b/base/fs/partitionmaps/msdos.py index 1634842..3d40f64 100644 --- a/base/fs/partitionmaps/msdos.py +++ b/base/fs/partitionmaps/msdos.py @@ -5,14 +5,25 @@ from common.tools import log_check_call class MSDOSPartitionMap(AbstractPartitionMap): + """Represents a MS-DOS partition map + Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is) + """ def __init__(self, data, bootloader): + """ + Args: + data (dict): volume.partitions part of the manifest + bootloader (str): Name of the bootloader we will use for bootstrapping + """ from common.bytes import Bytes + # List of partitions self.partitions = [] + # Returns the last partition unless there is none def last_partition(): return self.partitions[-1] if len(self.partitions) > 0 else None + # The boot and swap partitions are optional if 'boot' in data: self.boot = MSDOSPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], data['boot'].get('format_command', None), @@ -26,8 +37,11 @@ class MSDOSPartitionMap(AbstractPartitionMap): last_partition()) self.partitions.append(self.root) + # Mark boot as the boot partition, or root, if boot does not exist getattr(self, 'boot', self.root).flags.append('boot') + # If we are using the grub bootloader, we will need to create a 2 MB offset at the beginning + # of the partitionmap and steal it from the first partition if bootloader == 'grub': self.partitions[0].offset = Bytes('2MiB') self.partitions[0].size -= self.partitions[0].offset @@ -36,7 +50,10 @@ class MSDOSPartitionMap(AbstractPartitionMap): def _before_create(self, event): volume = event.volume + # Disk alignment still plays a role in virtualized environment, + # but I honestly have no clue as to what best practice is here, so we choose 'none' log_check_call(['parted', '--script', '--align', 'none', volume.device_path, '--', 'mklabel', 'msdos']) + # Create the partitions for partition in self.partitions: partition.create(volume) diff --git a/base/fs/partitionmaps/none.py b/base/fs/partitionmaps/none.py index 550cbf5..4b7935d 100644 --- a/base/fs/partitionmaps/none.py +++ b/base/fs/partitionmaps/none.py @@ -2,15 +2,35 @@ from ..partitions.single import SinglePartition class NoPartitions(object): + """Represents a virtual 'NoPartitions' partitionmap. + This virtual partition map exists because it is easier for tasks to + simply always deal with partition maps and then let the base abstract that away. + """ def __init__(self, data, bootloader): + """ + Args: + data (dict): volume.partitions part of the manifest + bootloader (str): Name of the bootloader we will use for bootstrapping + """ from common.bytes import Bytes + # In the NoPartitions partitions map we only have a single 'partition' self.root = SinglePartition(Bytes(data['root']['size']), data['root']['filesystem'], data['root'].get('format_command', None)) self.partitions = [self.root] def is_blocking(self): + """Returns whether the partition map is blocking volume detach operations + + Returns: + bool. + """ return self.root.fsm.current == 'mounted' def get_total_size(self): + """Returns the total size the partitions occupy + + Returns: + Bytes. The size of all the partitions + """ return self.root.get_end() diff --git a/base/fs/partitions/abstract.py b/base/fs/partitions/abstract.py index 6aae8c1..1dd4fe2 100644 --- a/base/fs/partitions/abstract.py +++ b/base/fs/partitions/abstract.py @@ -6,9 +6,13 @@ from common.fsm_proxy import FSMProxy class AbstractPartition(FSMProxy): + """Abstract representation of a partiton + This class is a finite state machine and represents the state of the real partition + """ __metaclass__ = ABCMeta + # Our states events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'}, {'name': 'format', 'src': 'created', 'dst': 'formatted'}, {'name': 'mount', 'src': 'formatted', 'dst': 'mounted'}, @@ -16,13 +20,26 @@ class AbstractPartition(FSMProxy): ] class Mount(object): + """Represents a mount into the partition + """ def __init__(self, source, destination, opts): + """ + Args: + source (str,AbstractPartition): The path from where we mount or a partition + destination (str): The path of the mountpoint + opts (list): List of options to pass to the mount command + """ self.source = source self.destination = destination self.opts = opts def mount(self, prefix): + """Performs the mount operation or forwards it to another partition + Args: + prefix (str): Path prefix of the mountpoint + """ mount_dir = os.path.join(prefix, self.destination) + # If the source is another partition, we tell that partition to mount itself if isinstance(self.source, AbstractPartition): self.source.mount(destination=mount_dir) else: @@ -30,6 +47,9 @@ class AbstractPartition(FSMProxy): self.mount_dir = mount_dir def unmount(self): + """Performs the unmount operation or asks the partition to unmount itself + """ + # If its a partition, it can unmount itself if isinstance(self.source, AbstractPartition): self.source.unmount() else: @@ -37,16 +57,30 @@ class AbstractPartition(FSMProxy): del self.mount_dir def __init__(self, size, filesystem, format_command): + """ + Args: + size (Bytes): Size of the partition + filesystem (str): Filesystem the partition should be formatted with + format_command (list): Optional format command, valid variables are fs, device_path and size + """ self.size = size self.filesystem = filesystem self.format_command = format_command + # Path to the partition self.device_path = None + # Dictionary with mount points as keys and Mount objects as values self.mounts = {} + # Create the configuration for our state machine cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} super(AbstractPartition, self).__init__(cfg) def get_uuid(self): + """Gets the UUID of the partition + + Returns: + str. The UUID of the partition + """ [uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path]) return uuid @@ -55,9 +89,17 @@ class AbstractPartition(FSMProxy): pass def get_end(self): + """Gets the end of the partition + + Returns: + Bytes. The end of the partition + """ return self.get_start() + self.size def _before_format(self, e): + """Formats the partition + """ + # If there is no explicit format_command define we simply call mkfs.fstype if self.format_command is None: format_command = ['mkfs.{fs}', '{device_path}'] else: @@ -67,29 +109,57 @@ class AbstractPartition(FSMProxy): 'size': self.size, } command = map(lambda part: part.format(**variables), format_command) + # Format the partition log_check_call(command) def _before_mount(self, e): + """Mount the partition + """ log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination]) self.mount_dir = e.destination def _after_mount(self, e): + """Mount any mounts associated with this partition + """ + # Make sure we mount in ascending order of mountpoint path length + # This ensures that we don't mount /dev/pts before we mount /dev for destination in sorted(self.mounts.iterkeys(), key=len): self.mounts[destination].mount(self.mount_dir) def _before_unmount(self, e): + """Unmount any mounts associated with this partition + """ + # Unmount the mounts in descending order of mounpoint path length + # You cannot unmount /dev before you have unmounted /dev/pts for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True): self.mounts[destination].unmount() log_check_call(['umount', self.mount_dir]) del self.mount_dir def add_mount(self, source, destination, opts=[]): + """Associate a mount with this partition + Automatically mounts it + + Args: + source (str,AbstractPartition): The source of the mount + destination (str): The path to the mountpoint + opts (list): Any options that should be passed to the mount command + """ + # Create a new mount object, mount it if the partition is mounted and put it in the mounts dict mount = self.Mount(source, destination, opts) if self.fsm.current == 'mounted': mount.mount(self.mount_dir) self.mounts[destination] = mount def remove_mount(self, destination): + """Remove a mount from this partition + Automatically unmounts it + + Args: + destination (str): The mountpoint path of the mount that should be removed + """ + # Unmount the mount if the partition is mounted and delete it from the mounts dict + # If the mount is already unmounted and the source is a partition, this will raise an exception if self.fsm.current == 'mounted': self.mounts[destination].unmount() del self.mounts[destination] diff --git a/base/fs/partitions/base.py b/base/fs/partitions/base.py index d8544f8..0ab8a8f 100644 --- a/base/fs/partitions/base.py +++ b/base/fs/partitions/base.py @@ -2,7 +2,11 @@ from abstract import AbstractPartition class BasePartition(AbstractPartition): + """Represents a partition that is actually a partition (and not a virtual one like 'Single') + """ + # Override the states of the abstract partition + # A real partition can be mapped and unmapped events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, {'name': 'format', 'src': 'mapped', 'dst': 'formatted'}, @@ -15,45 +19,87 @@ class BasePartition(AbstractPartition): ] def __init__(self, size, filesystem, format_command, previous): + """ + Args: + size (Bytes): Size of the partition + filesystem (str): Filesystem the partition should be formatted with + format_command (list): Optional format command, valid variables are fs, device_path and size + previous (BasePartition): The partition that preceeds this one + """ + # By saving the previous partition we have + # a linked list that partitions can go backwards in to find the first partition. self.previous = previous from common.bytes import Bytes + # Initialize the offset to 0 bytes, may be changed later self.offset = Bytes(0) + # List of flags that parted should put on the partition self.flags = [] super(BasePartition, self).__init__(size, filesystem, format_command) def create(self, volume): + """Creates the partition + + Args: + volume (Volume): The volume to create the partition on + """ self.fsm.create(volume=volume) def get_index(self): + """Gets the index of this partition in the partition map + + Returns: + int. The index of the partition in the partition map + """ if self.previous is None: + # Partitions are 1 indexed return 1 else: + # Recursive call to the previous partition, walking up the chain... return self.previous.get_index() + 1 def get_start(self): + """Gets the starting byte of this partition + + Returns: + Bytes. The starting byte of this partition + """ if self.previous is None: + # If there is no previous partition, this partition begins at the offset return self.offset else: + # Get the end of the previous partition and add the offset of this partition return self.previous.get_end() + self.offset def map(self, device_path): + """Maps the partition to a device_path + + Args: + device_path (str): The device patht his partition should be mapped to + """ self.fsm.map(device_path=device_path) def _before_create(self, e): + """Creates the partition + """ from common.tools import log_check_call + # The create command is failry simple, start and end are just Bytes objects coerced into strings create_command = ('mkpart primary {start} {end}' .format(start=str(self.get_start()), end=str(self.get_end()))) + # Create the partition log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, '--', create_command]) + # Set any flags on the partition for flag in self.flags: log_check_call(['parted', '--script', e.volume.device_path, '--', ('set {idx} {flag} on' .format(idx=str(self.get_index()), flag=flag))]) def _before_map(self, e): + # Set the device path self.device_path = e.device_path def _before_unmap(self, e): + # When unmapped, the device_path ifnromation becomes invalid, so we delete it self.device_path = None diff --git a/base/fs/partitions/gpt.py b/base/fs/partitions/gpt.py index 8dce6dd..b64641a 100644 --- a/base/fs/partitions/gpt.py +++ b/base/fs/partitions/gpt.py @@ -3,12 +3,23 @@ from base import BasePartition class GPTPartition(BasePartition): + """Represents a GPT partition + """ def __init__(self, size, filesystem, format_command, name, previous): + """ + Args: + size (Bytes): Size of the partition + filesystem (str): Filesystem the partition should be formatted with + format_command (list): Optional format command, valid variables are fs, device_path and size + name (str): The name of the partition + previous (BasePartition): The partition that preceeds this one + """ self.name = name super(GPTPartition, self).__init__(size, filesystem, format_command, previous) def _before_create(self, e): + # Create the partition and then set the name of the partition afterwards super(GPTPartition, self)._before_create(e) # partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical) name_command = ('name {idx} {name}' diff --git a/base/fs/partitions/gpt_swap.py b/base/fs/partitions/gpt_swap.py index e5fdc3d..364419e 100644 --- a/base/fs/partitions/gpt_swap.py +++ b/base/fs/partitions/gpt_swap.py @@ -3,8 +3,15 @@ from gpt import GPTPartition class GPTSwapPartition(GPTPartition): + """Represents a GPT swap partition + """ def __init__(self, size, previous): + """ + Args: + size (Bytes): Size of the partition + previous (BasePartition): The partition that preceeds this one + """ super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous) def _before_format(self, e): diff --git a/base/fs/partitions/msdos.py b/base/fs/partitions/msdos.py index e0f7f62..cb7d96d 100644 --- a/base/fs/partitions/msdos.py +++ b/base/fs/partitions/msdos.py @@ -2,4 +2,6 @@ from base import BasePartition class MSDOSPartition(BasePartition): + """Represents an MS-DOS partition + """ pass diff --git a/base/fs/partitions/msdos_swap.py b/base/fs/partitions/msdos_swap.py index 18c30ff..8c7db46 100644 --- a/base/fs/partitions/msdos_swap.py +++ b/base/fs/partitions/msdos_swap.py @@ -3,8 +3,15 @@ from msdos import MSDOSPartition class MSDOSSwapPartition(MSDOSPartition): + """Represents a MS-DOS swap partition + """ def __init__(self, size, previous): + """ + Args: + size (Bytes): Size of the partition + previous (BasePartition): The partition that preceeds this one + """ super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous) def _before_format(self, e): diff --git a/base/fs/partitions/single.py b/base/fs/partitions/single.py index 828ba91..7c0f9da 100644 --- a/base/fs/partitions/single.py +++ b/base/fs/partitions/single.py @@ -2,7 +2,15 @@ from abstract import AbstractPartition class SinglePartition(AbstractPartition): + """Represents a single virtual partition on an unpartitioned volume + """ def get_start(self): + """Gets the starting byte of this partition + + Returns: + Bytes. The starting byte of this partition + """ from common.bytes import Bytes + # On an unpartitioned volume there is no offset and no previous partition return Bytes(0) diff --git a/base/fs/partitions/unformatted.py b/base/fs/partitions/unformatted.py index bb8e343..271f35f 100644 --- a/base/fs/partitions/unformatted.py +++ b/base/fs/partitions/unformatted.py @@ -2,11 +2,20 @@ from base import BasePartition class UnformattedPartition(BasePartition): + """Represents an unformatted partition + It cannot be mounted + """ + # The states for our state machine. It can only be mapped, not mounted. events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, ] def __init__(self, size, previous): + """ + Args: + size (Bytes): Size of the partition + previous (BasePartition): The partition that preceeds this one + """ super(UnformattedPartition, self).__init__(size, None, None, previous) diff --git a/base/fs/volume.py b/base/fs/volume.py index 9f61541..34a9720 100644 --- a/base/fs/volume.py +++ b/base/fs/volume.py @@ -6,9 +6,13 @@ from partitionmaps.none import NoPartitions class Volume(FSMProxy): + """Represents an abstract volume. + This class is a finite state machine and represents the state of the real volume. + """ __metaclass__ = ABCMeta + # States this volume can be in events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'}, {'name': 'attach', 'src': 'detached', 'dst': 'attached'}, {'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'}, @@ -18,33 +22,76 @@ class Volume(FSMProxy): ] def __init__(self, partition_map): + """ + Args: + partition_map (PartitionMap): The partition map for the volume + """ + # Path to the volume self.device_path = None self.real_device_path = None + # The partition map self.partition_map = partition_map + # The size of the volume as reported by the partition map self.size = self.partition_map.get_total_size() + # Before detaching, check that nothing would block the detachment callbacks = {'onbeforedetach': self._check_blocking} if isinstance(self.partition_map, NoPartitions): + # When the volume has no partitions, the virtual root partition path is equal to that of the volume + # Update that path whenever the path to the volume changes def set_dev_path(e): self.partition_map.root.device_path = self.device_path callbacks['onafterattach'] = set_dev_path callbacks['onlink_dm_node'] = set_dev_path callbacks['onunlink_dm_node'] = set_dev_path + # Create the configuration for our finite state machine cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks} super(Volume, self).__init__(cfg) def _after_create(self, e): + """ + Args: + e (_e_obj): Event object containing arguments to create() + """ if isinstance(self.partition_map, NoPartitions): + # When the volume has no partitions, the virtual root partition + # is essentially created when the volume is created, forward that creation event. self.partition_map.root.create() def _check_blocking(self, e): + """Checks whether the volume is blocked + + Args: + e (_e_obj): Event object containing arguments to create() + + Raises: + VolumeError + """ + # Only the partition map can block the volume if self.partition_map.is_blocking(): raise VolumeError('The partitionmap prevents the detach procedure') def _before_link_dm_node(self, e): + """Links the volume using the device mapper + This allows us to create a 'window' into the volume that acts like a volum in itself. + Mainly it is used to fool grub into thinking that it is working with a real volume, + rather than a loopback device or a network block device. + + Args: + e (_e_obj): Event object containing arguments to create() + Arguments are: + logical_start_sector (int): The sector the volume should start at in the new volume + start_sector (int): The offset at which the volume should begin to be mapped in the new volume + sectors (int): The number of sectors that should be mapped + Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en + + Raises: + VolumeError + """ import os.path from common.fs import get_partitions + # Fetch information from /proc/partitions proc_partitions = get_partitions() device_name = os.path.basename(self.device_path) device_partition = proc_partitions[device_name] @@ -55,8 +102,10 @@ class Volume(FSMProxy): # The offset at which the volume should begin to be mapped in the new volume start_sector = getattr(e, 'start_sector', 0) + # The number of sectors that should be mapped sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector) + # This is the table we send to dmsetup, so that it may create a decie mapping for us. table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}' .format(log_start_sec=logical_start_sector, sectors=sectors, @@ -65,6 +114,7 @@ class Volume(FSMProxy): start_sec=start_sector)) import string import os.path + # Figure out the device letter and path for letter in string.ascii_lowercase: dev_name = 'vd' + letter dev_path = os.path.join('/dev/mapper', dev_name) @@ -76,12 +126,21 @@ class Volume(FSMProxy): if not hasattr(self, 'dm_node_name'): raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') + # Create the device mapping log_check_call(['dmsetup', 'create', self.dm_node_name], table) + # Update the device_path but remember the old one for when we unlink the volume again self.unlinked_device_path = self.device_path self.device_path = self.dm_node_path def _before_unlink_dm_node(self, e): + """Unlinks the device mapping + + Args: + e (_e_obj): Event object containing arguments to create() + """ log_check_call(['dmsetup', 'remove', self.dm_node_name]) + # Delete the no longer valid information del self.dm_node_name del self.dm_node_path + # Reset the device_path self.device_path = self.unlinked_device_path diff --git a/base/log.py b/base/log.py index 869f878..ece32e7 100644 --- a/base/log.py +++ b/base/log.py @@ -1,7 +1,20 @@ +"""This module holds functions and classes responsible for formatting the log output +both to a file and to the console. +.. module:: log +""" import logging def get_logfile_path(manifest_path): + """Returns the path to a logfile given a manifest + The logfile name is constructed from the current timestamp and the basename of the manifest + + Args: + manifest_path (str): The path to the manifest + + Returns: + str. The path to the logfile + """ import os.path from datetime import datetime @@ -13,17 +26,31 @@ def get_logfile_path(manifest_path): def setup_logger(logfile=None, debug=False): + """Sets up the python logger to log to both a file and the console + + Args: + logfile (str): Path to a logfile + debug (bool): Whether to log debug output to the console + """ root = logging.getLogger() + # Make sure all logging statements are processed by our handlers, they decide the log level root.setLevel(logging.NOTSET) + # Create a file log handler file_handler = logging.FileHandler(logfile) + # Absolute timestamps are rather useless when bootstrapping, it's much more interesting + # to see how long things take, so we log in a relative format instead file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s')) + # The file log handler always logs everything file_handler.setLevel(logging.DEBUG) root.addHandler(file_handler) + # Create a console log handler import sys console_handler = logging.StreamHandler(sys.stderr) + # We want to colorize the output to the console, so we add a formatter console_handler.setFormatter(ConsoleFormatter()) + # Set the log level depending on the debug argument if debug: console_handler.setLevel(logging.DEBUG) else: @@ -32,6 +59,8 @@ def setup_logger(logfile=None, debug=False): class ConsoleFormatter(logging.Formatter): + """Formats log statements for the console + """ level_colors = {logging.ERROR: 'red', logging.WARNING: 'magenta', logging.INFO: 'blue', @@ -39,11 +68,15 @@ class ConsoleFormatter(logging.Formatter): def format(self, record): if(record.levelno in self.level_colors): + # Colorize the message if we have a color for it (DEBUG has no color) from termcolor import colored record.msg = colored(record.msg, self.level_colors[record.levelno]) return super(ConsoleFormatter, self).format(record) class FileFormatter(logging.Formatter): + """Formats log statements for output to file + Currently this is just a stub + """ def format(self, record): return super(FileFormatter, self).format(record) diff --git a/base/main.py b/base/main.py index 8d28c44..add0301 100644 --- a/base/main.py +++ b/base/main.py @@ -1,20 +1,34 @@ +"""Main module containing all the setup necessary for running the bootstrapping process +.. module:: main +""" + import logging log = logging.getLogger(__name__) def main(): + """Main function for invoking the bootstrap process + + Raises: + Exception + """ + # Get the commandline arguments import os args = get_args() # Require root privileges, except when doing a dry-run where they aren't needed if os.geteuid() != 0 and not args.dry_run: raise Exception('This program requires root privileges.') + # Setup logging import log logfile = log.get_logfile_path(args.manifest) log.setup_logger(logfile=logfile, debug=args.debug) + # Everything has been set up, begin the bootstrapping process run(args) def get_args(): + """Creates an argument parser and returns the arguments it has parsed + """ from argparse import ArgumentParser parser = ArgumentParser(description='Bootstrap Debian for the cloud.') parser.add_argument('--debug', action='store_true', @@ -28,31 +42,57 @@ def get_args(): def run(args): + """Runs the bootstrapping process + + Args: + args (dict): Dictionary of arguments from the commandline + """ + # Load the manifest from manifest import Manifest manifest = Manifest(args.manifest) + # Get the tasklist from tasklist import TaskList tasklist = TaskList() + # 'resolve_tasks' is the name of the function to call on the provider and plugins tasklist.load('resolve_tasks', manifest) + # Create the bootstrap information object that'll be used throughout the bootstrapping process from bootstrapinfo import BootstrapInformation bootstrap_info = BootstrapInformation(manifest=manifest, debug=args.debug) try: + # Run all the tasks the tasklist has gathered tasklist.run(info=bootstrap_info, dry_run=args.dry_run) + # We're done! :-) log.info('Successfully completed bootstrapping') except (Exception, KeyboardInterrupt) as e: + # When an error occurs, log it and begin rollback log.exception(e) if args.pause_on_error: + # The --pause-on-error is useful when the user wants to inspect the volume before rollback raw_input('Press Enter to commence rollback') log.error('Rolling back') + # Create a new tasklist to gather the necessary tasks for rollback rollback_tasklist = TaskList() + # Create a useful little function for the provider and plugins to use, + # when figuring out what tasks should be added to the rollback list. def counter_task(task, counter): + """counter_task() adds the second argument to the rollback tasklist + if the first argument is present in the list of completed tasks + + Args: + task (Task): The task to look for in the completed tasks list + counter (Task): The task to add to the rollback tasklist + """ if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed: rollback_tasklist.tasks.add(counter) + # Ask the provider and plugins for tasks they'd like to add to the rollback tasklist + # Any additional arguments beyond the first two are passed directly to the provider and plugins rollback_tasklist.load('resolve_rollback_tasks', manifest, counter_task) + # Run the rollback tasklist rollback_tasklist.run(info=bootstrap_info, dry_run=args.dry_run) log.info('Successfully completed rollback') diff --git a/base/manifest.py b/base/manifest.py index a8bd61e..ecd4537 100644 --- a/base/manifest.py +++ b/base/manifest.py @@ -1,22 +1,48 @@ +"""The Manifest module contains the manifest that providers and plugins use +to determine which tasks should be added to the tasklist, what arguments various +invocations should have etc.. +.. module:: manifest +""" from common.tools import load_json import logging log = logging.getLogger(__name__) class Manifest(object): + """This class holds all the information that providers and plugins need + to perform the bootstrapping process. All actions that are taken originate from + here. The manifest shall not be modified after it has been loaded. + Currently, immutability is not enforced and it would require a fair amount of code + to enforce it, instead we just rely on tasks behaving properly. + """ def __init__(self, path): + """Initializer: Given a path we load, validate and parse the manifest. + + Args: + path (str): The path to the manifest + """ self.path = path self.load() self.validate() self.parse() def load(self): + """Loads the manifest. + This function not only reads the manifest but also loads the specified provider and plugins. + Once they are loaded, the initialize() function is called on each of them (if it exists). + The provider must have an initialize function. + """ + # Load the manifest JSON using the loader in common.tools + # It strips comments (which are invalid in strict json) before loading the data. self.data = load_json(self.path) + # Get the provider name from the manifest and load the corresponding module provider_modname = 'providers.{provider}'.format(provider=self.data['provider']) log.debug('Loading provider `{modname}\''.format(modname=provider_modname)) + # Create a modules dict that contains the loaded provider and plugins self.modules = {'provider': __import__(provider_modname, fromlist=['providers']), 'plugins': [], } + # Run through all the plugins mentioned in the manifest and load them if 'plugins' in self.data: for plugin_name, plugin_data in self.data['plugins'].iteritems(): modname = 'plugins.{plugin}'.format(plugin=plugin_name) @@ -24,37 +50,62 @@ class Manifest(object): plugin = __import__(modname, fromlist=['plugins']) self.modules['plugins'].append(plugin) + # Run the initialize function on the provider and plugins self.modules['provider'].initialize() for module in self.modules['plugins']: + # Plugins are not required to have an initialize function init = getattr(module, 'initialize', None) if callable(init): init() def validate(self): + """Validates the manifest using the base, provider and plugin validation functions. + Plugins are not required to have a validate_manifest function + """ from . import validate_manifest + # Validate the manifest with the base validation function in __init__ validate_manifest(self.data, self.schema_validator, self.validation_error) + # Run the provider validation self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error) + # Run the validation function for any plugin that has it for plugin in self.modules['plugins']: validate = getattr(plugin, 'validate_manifest', None) if callable(validate): validate(self.data, self.schema_validator, self.validation_error) def parse(self): + """Parses the manifest. + Well... "parsing" is a big word. + The function really just sets up some convenient attributes so that tasks + don't have to access information with info.manifest.data['section'] + but can do it with info.manifest.section. + """ self.provider = self.data['provider'] self.bootstrapper = self.data['bootstrapper'] self.image = self.data['image'] self.volume = self.data['volume'] self.system = self.data['system'] + # The packages and plugins section is not required self.packages = self.data['packages'] if 'packages' in self.data else {} self.plugins = self.data['plugins'] if 'plugins' in self.data else {} def load_json(self, path): + """Loads JSON. Unused and will be removed. + Use common.tools.load_json instead + """ import json from minify_json import json_minify with open(path) as stream: return json.loads(json_minify(stream.read(), False)) def schema_validator(self, data, schema_path): + """This convenience function is passed around to all the validation functions + so that they may run a json-schema validation by giving it the data and a path to the schema. + + Args: + data (dict): Data to validate (normally the manifest data) + schema_path (str): Path to the json-schema to use for validation + """ import jsonschema schema = load_json(schema_path) try: @@ -63,5 +114,12 @@ class Manifest(object): self.validation_error(e.message, e.path) def validation_error(self, message, json_path=None): + """This function is passed to all validation functions so that they may + raise a validation error because a custom validation of the manifest failed. + + Args: + message (str): Message to user about the error + json_path (list): A path to the location in the manifest where the error occurred + """ from common.exceptions import ManifestError raise ManifestError(message, self.path, json_path) diff --git a/base/phase.py b/base/phase.py index 9acf825..12fd5d2 100644 --- a/base/phase.py +++ b/base/phase.py @@ -1,16 +1,33 @@ class Phase(object): + """The Phase class represents a phase a task may be in. + It has no function other than to act as an anchor in the task graph. + All phases are instantiated in common.phases + """ def __init__(self, name, description): + # The name of the phase self.name = name + # The description of the phase (currently not used anywhere) self.description = description def pos(self): + """Gets the position of the phase + Returns: + int. The positional index of the phase in relation to the other phases + """ from common.phases import order return next(i for i, phase in enumerate(order) if phase is self) def __cmp__(self, other): + """Compares the phase order in relation to the other phases + """ return self.pos() - other.pos() def __str__(self): + """String representation of the phase, the name suffices + + Returns: + string. + """ return self.name diff --git a/base/pkg/exceptions.py b/base/pkg/exceptions.py index 9437f9c..dc7534b 100644 --- a/base/pkg/exceptions.py +++ b/base/pkg/exceptions.py @@ -1,8 +1,12 @@ class PackageError(Exception): + """Raised when an error occurrs while handling the packageslist + """ pass class SourceError(Exception): + """Raised when an error occurs while handling the sourceslist + """ pass diff --git a/base/pkg/packagelist.py b/base/pkg/packagelist.py index 06231d4..23e596d 100644 --- a/base/pkg/packagelist.py +++ b/base/pkg/packagelist.py @@ -2,38 +2,84 @@ from exceptions import PackageError class PackageList(object): + """Represents a list of packages + """ class Remote(object): + """A remote package with an optional target + """ def __init__(self, name, target): + """ + Args: + name (str): The name of the package + target (str): The name of the target release + """ self.name = name self.target = target def __str__(self): + """Converts the package into somehting that apt-get install can parse + Returns: + string. + """ if self.target is None: return self.name else: return '{name}/{target}'.format(name=self.name, target=self.target) class Local(object): + """A local package + """ def __init__(self, path): + """ + Args: + path (str): The path to the local package + """ self.path = path def __str__(self): + """ + Returns: + string. The path to the local package + """ return self.path def __init__(self, manifest_vars, source_lists): + """ + Args: + manifest_vars (dict): The manifest variables + source_lists (SourceLists): The sourcelists for apt + """ self.manifest_vars = manifest_vars self.source_lists = source_lists + # The default_target is the release we are bootstrapping self.default_target = '{system.release}'.format(**self.manifest_vars) + # The list of packages that should be installed, this is not a set. + # We want to preserve the order in which the packages were added so that local + # packages may be installed in the correct order. self.install = [] + # A function that filters the install list and only returns remote packages self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install) def add(self, name, target=None): + """Adds a package to the install list + + Args: + name (str): The name of the package to install, may contain manifest vars references + target (str): The name of the target release for the package, may contain manifest vars references + + Raises: + PackageError + """ name = name.format(**self.manifest_vars) if target is not None: target = target.format(**self.manifest_vars) + # Check if the package has already been added. + # If so, make sure it's the same target and raise a PackageError otherwise package = next((pkg for pkg in self.remote() if pkg.name == name), None) if package is not None: + # It's the same target if the target names match or one of the targets is None + # and the other is the default target. same_target = package.target == target same_target = same_target or package.target is None and target == self.default_target same_target = same_target or package.target == self.default_target and target is None @@ -42,8 +88,10 @@ class PackageList(object): 'but with target release `{target}\' instead of `{add_target}\'' .format(name=name, target=package.target, add_target=target)) raise PackageError(msg) + # The package has already been added, skip the checks below return + # Check if the target exists in the sources list, raise a PackageError if not check_target = target if check_target is None: check_target = self.default_target @@ -51,8 +99,17 @@ class PackageList(object): msg = ('The target release {target} was not found in the sources list').format(target=check_target) raise PackageError(msg) + # Note that we maintain the target value even if it is none. + # This allows us to preserve the semantics of the default target when calling apt-get install + # Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define + # a target release. Only `apt-get install nfs-client` works. self.install.append(self.Remote(name, target)) def add_local(self, package_path): + """Adds a local package to the installation list + + Args: + package_path (str): Path to the local package, may contain manifest vars references + """ package_path = package_path.format(**self.manifest_vars) self.install.append(self.Local(package_path)) diff --git a/base/pkg/sourceslist.py b/base/pkg/sourceslist.py index 7dc6486..0a50243 100644 --- a/base/pkg/sourceslist.py +++ b/base/pkg/sourceslist.py @@ -1,12 +1,27 @@ class SourceLists(object): + """Represents a list of sources lists for apt + """ def __init__(self, manifest_vars): + """ + Args: + manifest_vars (dict): The manifest variables + """ + # A dictionary with the name of the file in sources.list.d as the key + # That values are lists of Source objects self.sources = {} + # Save the manifest variables, we need the later on self.manifest_vars = manifest_vars def add(self, name, line): + """Adds a source to the apt sources list + + Args: + name (str): Name of the file in sources.list.d, may contain manifest vars references + line (str): The line for the source file, may contain manifest vars references + """ name = name.format(**self.manifest_vars) line = line.format(**self.manifest_vars) if name not in self.sources: @@ -14,7 +29,16 @@ class SourceLists(object): self.sources[name].append(Source(line)) def target_exists(self, target): + """Checks whether the target exists in the sources list + + Args: + target (str): Name of the target to check for, may contain manifest vars references + + Returns: + bool. Whether the target exists + """ target = target.format(**self.manifest_vars) + # Run through all the sources and return True if the target exists for lines in self.sources.itervalues(): if target in (source.distribution for source in lines): return True @@ -22,8 +46,20 @@ class SourceLists(object): class Source(object): + """Represents a single source line + """ def __init__(self, line): + """ + Args: + line (str): A apt source line + + Raises: + SourceError + """ + # Parse the source line and populate the class attributes with it + # The format is taken from `man sources.list` + # or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en import re regexp = re.compile('^(?Pdeb|deb-src)\s+' '(\[\s*(?P.+\S)?\s*\]\s+)?' @@ -45,6 +81,12 @@ class Source(object): self.components = re.sub(' +', ' ', match['components']).split(' ') def __str__(self): + """Convert the object into a source line + This is pretty much the reverse of what we're doing in the initialization function. + + Returns: + string. + """ options = '' if len(self.options) > 0: options = ' [{options}]'.format(options=' '.join(self.options)) diff --git a/base/task.py b/base/task.py index e980477..852106f 100644 --- a/base/task.py +++ b/base/task.py @@ -1,17 +1,37 @@ class Task(object): + """The task class represents are task that can be run. + It is merely a wrapper for the run function and should never be instantiated. + """ + # The phase this task is located in. phase = None + # List of tasks that should run before this task is run predecessors = [] + # List of tasks that should run after this task has run successors = [] class __metaclass__(type): + """Metaclass to control how the class is coerced into a string + """ def __repr__(cls): + """ + Returns: + string. + """ return '{module}.{task}'.format(module=cls.__module__, task=cls.__name__) def __str__(cls): + """ + Returns: + string. + """ return repr(cls) @classmethod def run(cls, info): + """The run function, all work is done inside this function + Args: + info (BootstrapInformation): The bootstrap info object + """ pass diff --git a/base/tasklist.py b/base/tasklist.py index 216c466..73c70ae 100644 --- a/base/tasklist.py +++ b/base/tasklist.py @@ -1,35 +1,68 @@ +"""The tasklist module contains the TaskList class. +.. module:: tasklist +""" + from common.exceptions import TaskListError import logging log = logging.getLogger(__name__) class TaskList(object): + """The tasklist class aggregates all tasks that should be run + and orders them according to their dependencies. + """ def __init__(self): self.tasks = set() self.tasks_completed = [] def load(self, function, manifest, *args): + """Calls 'function' on the provider and all plugins that have been loaded by the manifest. + Any additional arguments are passed directly to 'function'. + The function that is called shall accept the taskset as its first argument and the manifest + as its second argument. + + Args: + function (str): Name of the function to call + manifest (Manifest): The manifest + *args: Additional arguments that should be passed to the function that is called + """ + # Call 'function' on the provider getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args) for plugin in manifest.modules['plugins']: + # Plugins har not required to have whatever function we call fn = getattr(plugin, function, None) if callable(fn): fn(self.tasks, manifest, *args) def run(self, info={}, dry_run=False): + """Converts the taskgraph into a list and runs all tasks in that list + + Args: + info (dict): The bootstrap information object + dry_run (bool): Whether to actually run the tasks or simply step through them + """ + # Create a list for us to run task_list = self.create_list() + # Output the tasklist log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list)))) for task in task_list: + # Tasks are not required to have a description if hasattr(task, 'description'): log.info(task.description) else: + # If there is no description, simply coerce the task into a string and print its name log.info('Running {task}'.format(task=task)) if not dry_run: + # Run the task task.run(info) + # Remember which tasks have been run for later use (e.g. when rolling back, because of an error) self.tasks_completed.append(task) def create_list(self): + """Creates a list of all the tasks that should be run. + """ from common.phases import order # Get a hold of all tasks tasks = self.get_all_tasks() @@ -52,9 +85,11 @@ class TaskList(object): # Map the successors to the task graph[task] = successors + # Use the strongly connected components algorithm to check for cycles in our task graph components = self.strongly_connected_components(graph) cycles_found = 0 for component in components: + # Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out if len(component) > 1: cycles_found += 1 log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component)))) @@ -72,6 +107,11 @@ class TaskList(object): return sorted_tasks def get_all_tasks(self): + """Gets a list of all task classes in the package + + Returns: + list. A list of all tasks in the package + """ # Get a generator that returns all classes in the package classes = self.get_all_classes('..') @@ -81,8 +121,18 @@ class TaskList(object): return issubclass(obj, Task) and obj is not Task return filter(is_task, classes) # Only return classes that are tasks - # Given a path, retrieve all the classes in it def get_all_classes(self, path=None): + """ Given a path to a package, this function retrieves all the classes in it + + Args: + path (str): Path to the package + + Returns: + generator. A generator that yields classes + + Raises: + Exception + """ import pkgutil import importlib import inspect @@ -99,13 +149,28 @@ class TaskList(object): yield obj def check_ordering(self, task): + """Checks the ordering of a task in relation to other tasks and their phases + This function checks for a subset of what the strongly connected components algorithm does, + but can deliver a more precise error message, namely that there is a conflict between + what a task has specified as its predecessors or successors and in which phase it is placed. + + Args: + task (Task): The task to check the ordering for + + Raises: + TaskListError + """ for successor in task.successors: + # Run through all successors and check whether the phase of the task + # comes before the phase of a successor if successor.phase > successor.phase: msg = ("The task {task} is specified as running before {other}, " "but its phase '{phase}' lies after the phase '{other_phase}'" .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) raise TaskListError(msg) for predecessor in task.predecessors: + # Run through all predecessors and check whether the phase of the task + # comes after the phase of a predecessor if task.phase < predecessor.phase: msg = ("The task {task} is specified as running after {other}, " "but its phase '{phase}' lies before the phase '{other_phase}'" @@ -113,9 +178,15 @@ class TaskList(object): raise TaskListError(msg) def strongly_connected_components(self, graph): - # Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py - # Find the strongly connected components in a graph using Tarjan's algorithm. - # graph should be a dictionary mapping node names to lists of successor nodes. + """Find the strongly connected components in a graph using Tarjan's algorithm. + Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py + + Args: + graph (dict): mapping of tasks to lists of successor tasks + + Returns: + list. List of tuples that are strongly connected comoponents + """ result = [] stack = [] @@ -147,7 +218,15 @@ class TaskList(object): return result def topological_sort(self, graph): - # Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py + """Runs a topological sort on a graph + Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py + + Args: + graph (dict): mapping of tasks to lists of successor tasks + + Returns: + list. A list of all tasks in the graph sorted according to ther dependencies + """ count = {} for node in graph: count[node] = 0 diff --git a/plugins/puppet/tasks.py b/plugins/puppet/tasks.py index 369979e..e2f0e22 100644 --- a/plugins/puppet/tasks.py +++ b/plugins/puppet/tasks.py @@ -99,3 +99,13 @@ class ApplyPuppetManifest(Task): from common.tools import sed_i hosts_path = os.path.join(info.root, 'etc/hosts') sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '') + + +class EnableAgent(Task): + description = 'Enabling the puppet agent' + phase = phases.system_modification + + @classmethod + def run(cls, info): + puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet') + sed_i(puppet_defaults, 'START=no', 'START=yes') From e05072c43bb8e15987870940bc669867276266c3 Mon Sep 17 00:00:00 2001 From: Tiago Ilieve Date: Mon, 24 Mar 2014 09:44:57 -0300 Subject: [PATCH 22/22] zerofree is used against partitions, not devices --- plugins/minimize_size/tasks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/minimize_size/tasks.py b/plugins/minimize_size/tasks.py index b04f301..161cefd 100644 --- a/plugins/minimize_size/tasks.py +++ b/plugins/minimize_size/tasks.py @@ -62,15 +62,15 @@ class AddRequiredCommands(Task): class Zerofree(Task): - description = 'Zeroing unused blocks on the volume' + description = 'Zeroing unused blocks on the root partition' phase = phases.volume_unmounting - predecessors = [filesystem.UnmountRoot, partitioning.UnmapPartitions] - successors = [volume.Detach] + predecessors = [filesystem.UnmountRoot] + successors = [partitioning.UnmapPartitions, volume.Detach] @classmethod def run(cls, info): from common.tools import log_check_call - log_check_call(['zerofree', info.volume.device_path]) + log_check_call(['zerofree', info.volume.partition_map.root.device_path]) class ShrinkVolume(Task):