diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9e0498c..8feabfe 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,130 +5,130 @@ Changelog 2016-06-02 ---------- Peter Wagner - * Added ec2_publish plugin + * Added ec2_publish plugin 2016-06-02 ---------- Zach Marano: - * Fix expand-root script to work with newer version of growpart (in jessie-backports and beyond). - * Overhaul Google Compute Engine image build. - * Add support for Google Cloud repositories. - * Google Cloud SDK install uses a deb package from a Google Cloud repository. - * Google Compute Engine guest software is installed from a Google Cloud repository. - * Google Compute Engine guest software for Debian 8 is updated to new refactor. - * Google Compute Engine wheezy and wheezy-backports manifests are deprecated. + * Fix expand-root script to work with newer version of growpart (in jessie-backports and beyond). + * Overhaul Google Compute Engine image build. + * Add support for Google Cloud repositories. + * Google Cloud SDK install uses a deb package from a Google Cloud repository. + * Google Compute Engine guest software is installed from a Google Cloud repository. + * Google Compute Engine guest software for Debian 8 is updated to new refactor. + * Google Compute Engine wheezy and wheezy-backports manifests are deprecated. 2016-03-03 ---------- Anders Ingemann: - * Rename integration tests to system tests + * Rename integration tests to system tests 2016-02-23 ---------- Nicolas Braud-Santoni: - * #282, #290: Added 'debconf' plugin - * #290: Relaxed requirements on plugins manifests + * #282, #290: Added 'debconf' plugin + * #290: Relaxed requirements on plugins manifests 2016-02-10 ---------- Manoj Srivastava: - * #252: Added support for password and static pubkey auth + * #252: Added support for password and static pubkey auth 2016-02-06 ---------- Tiago Ilieve: - * Added Oracle Compute Cloud provider - * #280: Declared Squeeze as unsupported + * Added Oracle Compute Cloud provider + * #280: Declared Squeeze as unsupported 2016-01-14 ---------- Jesse Szwedko: - * #269: EC2: Added growpart script extension + * #269: EC2: Added growpart script extension 2016-01-10 ---------- Clark Laughlin: - * Enabled support for KVM on arm64 + * Enabled support for KVM on arm64 2015-12-19 ---------- Tim Sattarov: - * #263: Ignore loopback interface in udev rules (reduces startup of networking by a factor of 10) + * #263: Ignore loopback interface in udev rules (reduces startup of networking by a factor of 10) 2015-12-13 ---------- Anders Ingemann: - * Docker provider implemented (including integration testing harness & tests) - * minimize_size: Added various size reduction options for dpkg and apt - * Removed image section in manifest. - Provider specific options have been moved to the provider section. - The image name is now specified on the top level of the manifest with "name" - * Provider docs have been greatly improved. All now list their special options. - * All manifest option documentation is now accompanied by an example. - * Added documentation for the integration test providers + * Docker provider implemented (including integration testing harness & tests) + * minimize_size: Added various size reduction options for dpkg and apt + * Removed image section in manifest. + Provider specific options have been moved to the provider section. + The image name is now specified on the top level of the manifest with "name" + * Provider docs have been greatly improved. All now list their special options. + * All manifest option documentation is now accompanied by an example. + * Added documentation for the integration test providers 2015-11-13 ---------- Marcin Kulisz: - * Exclude docs from binary package + * Exclude docs from binary package 2015-10-20 ---------- Max Illfelder: - * Remove support for the GCE Debian mirror + * Remove support for the GCE Debian mirror 2015-10-14 ---------- Anders Ingemann: - * Bootstrap azure images directly to VHD + * Bootstrap azure images directly to VHD 2015-09-28 ---------- Rick Wright: - * Change GRUB_HIDDEN_TIMEOUT to 0 from true and set GRUB_HIDDEN_TIMEOUT_QUIET to true. + * Change GRUB_HIDDEN_TIMEOUT to 0 from true and set GRUB_HIDDEN_TIMEOUT_QUIET to true. 2015-09-24 ---------- Rick Wright: - * Fix a problem with Debian 8 on GCE with >2TB disks + * Fix a problem with Debian 8 on GCE with >2TB disks 2015-09-04 ---------- Emmanuel Kasper: - * Set Virtualbox memory to 512 MB + * Set Virtualbox memory to 512 MB 2015-08-07 ---------- Tiago Ilieve: - * Change default Debian mirror + * Change default Debian mirror 2015-08-06 ---------- Stephen A. Zarkos: - * Azure: Change default shell in /etc/default/useradd for Azure images - * Azure: Add boot parameters to Azure config to ease local debugging - * Azure: Add apt import for backports - * Azure: Comment GRUB_HIDDEN_TIMEOUT so we can set GRUB_TIMEOUT - * Azure: Wheezy images use wheezy-backports kernel by default - * Azure: Change Wheezy image to use single partition - * Azure: Update WALinuxAgent to use 2.0.14 - * Azure: Make sure we can override grub.ConfigureGrub for Azure images - * Azure: Add console=tty0 to see kernel/boot messsages on local console - * Azure: Set serial port speed to 115200 - * Azure: Fix error with applying azure/assets/udev.diff + * Azure: Change default shell in /etc/default/useradd for Azure images + * Azure: Add boot parameters to Azure config to ease local debugging + * Azure: Add apt import for backports + * Azure: Comment GRUB_HIDDEN_TIMEOUT so we can set GRUB_TIMEOUT + * Azure: Wheezy images use wheezy-backports kernel by default + * Azure: Change Wheezy image to use single partition + * Azure: Update WALinuxAgent to use 2.0.14 + * Azure: Make sure we can override grub.ConfigureGrub for Azure images + * Azure: Add console=tty0 to see kernel/boot messsages on local console + * Azure: Set serial port speed to 115200 + * Azure: Fix error with applying azure/assets/udev.diff 2015-07-30 ---------- James Bromberger: - * AWS: Support multiple ENI - * AWS: PVGRUB AKIs for Frankfurt region + * AWS: Support multiple ENI + * AWS: PVGRUB AKIs for Frankfurt region 2015-06-29 ---------- Alex Adriaanse: - * Fix DKMS kernel version error - * Add support for Btrfs - * Add EC2 Jessie HVM manifest + * Fix DKMS kernel version error + * Add support for Btrfs + * Add EC2 Jessie HVM manifest 2015-05-08 ---------- @@ -138,143 +138,143 @@ Alexandre Derumier: 2015-05-02 ---------- Anders Ingemann: - * Fix #32: Add image_commands example - * Fix #99: rename image_commands to commands - * Fix #139: Vagrant / Virtualbox provider should set ostype when 32 bits selected - * Fix #204: Create a new phase where user modification tasks can run + * Fix #32: Add image_commands example + * Fix #99: rename image_commands to commands + * Fix #139: Vagrant / Virtualbox provider should set ostype when 32 bits selected + * Fix #204: Create a new phase where user modification tasks can run 2015-04-29 ---------- Anders Ingemann: - * Fix #104: Don't verify default target when adding packages - * Fix #217: Implement get_version() function in common.tools + * Fix #104: Don't verify default target when adding packages + * Fix #217: Implement get_version() function in common.tools 2015-04-28 ---------- Jonh Wendell: - * root_password: Enable SSH root login + * root_password: Enable SSH root login 2015-04-27 ---------- John Kristensen: - * Add authentication support to the apt proxy plugin + * Add authentication support to the apt proxy plugin 2015-04-25 ---------- Anders Ingemann (work started 2014-08-31, merged on 2015-04-25): - * Introduce `remote bootstrapping `__ - * Introduce `integration testing `__ (for VirtualBox and EC2) - * Merge the end-user documentation into the sphinx docs - (plugin & provider docs are now located in their respective folders as READMEs) - * Include READMEs in sphinx docs and transform their links - * Docs for integration testing - * Document the remote bootstrapping procedure - * Add documentation about the documentation - * Add list of supported builds to the docs - * Add html output to integration tests - * Implement PR #201 by @jszwedko (bump required euca2ools version) - * grub now works on jessie - * extlinux is now running on jessie - * Issue warning when specifying pre/successors across phases (but still error out if it's a conflict) - * Add salt dependencies in the right phase - * extlinux now works with GPT on HVM instances - * Take @ssgelm's advice in #155 and copy the mount table -- df warnings no more - * Generally deny installing grub on squeeze (too much of a hassle to get working, PRs welcome) - * Add 1 sector gap between partitions on GPT - * Add new task: DeterminKernelVersion, this can potentially fix a lot of small problems - * Disable getty processes on jessie through logind config - * Partition volumes by sectors instead of bytes - This allows for finer grained control over the partition sizes and gaps - Add new Sectors unit, enhance Bytes unit, add unit tests for both - * Don't require qemu for raw volumes, use `truncate` instead - * Fix #179: Disabling getty processes task fails half the time - * Split grub and extlinux installs into separate modules - * Fix extlinux config for squeeze - * Fix #136: Make extlinux output boot messages to the serial console - * Extend sed_i to raise Exceptions when the expected amount of replacements is not met + * Introduce `remote bootstrapping `__ + * Introduce `integration testing `__ (for VirtualBox and EC2) + * Merge the end-user documentation into the sphinx docs + (plugin & provider docs are now located in their respective folders as READMEs) + * Include READMEs in sphinx docs and transform their links + * Docs for integration testing + * Document the remote bootstrapping procedure + * Add documentation about the documentation + * Add list of supported builds to the docs + * Add html output to integration tests + * Implement PR #201 by @jszwedko (bump required euca2ools version) + * grub now works on jessie + * extlinux is now running on jessie + * Issue warning when specifying pre/successors across phases (but still error out if it's a conflict) + * Add salt dependencies in the right phase + * extlinux now works with GPT on HVM instances + * Take @ssgelm's advice in #155 and copy the mount table -- df warnings no more + * Generally deny installing grub on squeeze (too much of a hassle to get working, PRs welcome) + * Add 1 sector gap between partitions on GPT + * Add new task: DeterminKernelVersion, this can potentially fix a lot of small problems + * Disable getty processes on jessie through logind config + * Partition volumes by sectors instead of bytes + This allows for finer grained control over the partition sizes and gaps + Add new Sectors unit, enhance Bytes unit, add unit tests for both + * Don't require qemu for raw volumes, use `truncate` instead + * Fix #179: Disabling getty processes task fails half the time + * Split grub and extlinux installs into separate modules + * Fix extlinux config for squeeze + * Fix #136: Make extlinux output boot messages to the serial console + * Extend sed_i to raise Exceptions when the expected amount of replacements is not met Jonas Bergler: - * Fixes #145: Fix installation of vbox guest additions. + * Fixes #145: Fix installation of vbox guest additions. Tiago Ilieve: - * Fixes #142: msdos partition type incorrect for swap partition (Linux) + * Fixes #142: msdos partition type incorrect for swap partition (Linux) 2015-04-23 ---------- Tiago Ilieve: - * Fixes #212: Sparse file is created on the current directory + * Fixes #212: Sparse file is created on the current directory 2014-11-23 ---------- Noah Fontes: - * Add support for enhanced networking on EC2 images + * Add support for enhanced networking on EC2 images 2014-07-12 ---------- Tiago Ilieve: - * Fixes #96: AddBackports is now a common task + * Fixes #96: AddBackports is now a common task 2014-07-09 ---------- Anders Ingemann: - * Allow passing data into the manifest - * Refactor logging setup to be more modular - * Convert every JSON file to YAML - * Convert "provider" into provider specific section + * Allow passing data into the manifest + * Refactor logging setup to be more modular + * Convert every JSON file to YAML + * Convert "provider" into provider specific section 2014-07-02 ---------- Vladimir Vitkov: - * Improve grub options to work better with virtual machines + * Improve grub options to work better with virtual machines 2014-06-30 ---------- Tomasz Rybak: - * Return information about created image + * Return information about created image 2014-06-22 ---------- Victor Marmol: - * Enable the memory cgroup for the Docker plugin + * Enable the memory cgroup for the Docker plugin 2014-06-19 ---------- Tiago Ilieve: - * Fixes #94: allow stable/oldstable as release name on manifest + * Fixes #94: allow stable/oldstable as release name on manifest Vladimir Vitkov: - * Improve ami listing performance + * Improve ami listing performance 2014-06-07 ---------- Tiago Ilieve: - * Download `gsutil` tarball to workspace instead of working directory - * Fixes #97: remove raw disk image created by GCE after build + * Download `gsutil` tarball to workspace instead of working directory + * Fixes #97: remove raw disk image created by GCE after build 2014-06-06 ---------- Ilya Margolin: - * pip_install plugin + * pip_install plugin 2014-05-23 ---------- Tiago Ilieve: - * Fixes #95: check if the specified APT proxy server can be reached + * Fixes #95: check if the specified APT proxy server can be reached 2014-05-04 ---------- Dhananjay Balan: - * Salt minion installation & configuration plugin - * Expose debootstrap --include-packages and --exclude-packages options to manifest + * Salt minion installation & configuration plugin + * Expose debootstrap --include-packages and --exclude-packages options to manifest 2014-05-03 ---------- Anders Ingemann: - * Require hostname setting for vagrant plugin - * Fixes #14: S3 images can now be bootstrapped outside EC2. - * Added enable_agent option to puppet plugin + * Require hostname setting for vagrant plugin + * Fixes #14: S3 images can now be bootstrapped outside EC2. + * Added enable_agent option to puppet plugin 2014-05-02 ---------- Tomasz Rybak: - * Added Google Compute Engine Provider + * Added Google Compute Engine Provider diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 8e541be..5525f04 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -143,10 +143,8 @@ guidelines. There however a few exceptions: * Max line length is 110 chars, not 80. * Multiple assignments may be aligned with spaces so that the = match vertically. -* Ignore ``E101``: Indent with tabs and align with spaces * Ignore ``E221 & E241``: Alignment of assignments * Ignore ``E501``: The max line length is not 80 characters -* Ignore ``W191``: Indent with tabs not spaces The codebase can be checked for any violations quite easily, since those rules are already specified in the `tox `__ configuration file. diff --git a/bootstrap-vz b/bootstrap-vz index de62f32..5040baa 100755 --- a/bootstrap-vz +++ b/bootstrap-vz @@ -1,5 +1,5 @@ #!/usr/bin/env python if __name__ == '__main__': - from bootstrapvz.base.main import main - main() + from bootstrapvz.base.main import main + main() diff --git a/bootstrap-vz-remote b/bootstrap-vz-remote index d7b7254..bc432e5 100755 --- a/bootstrap-vz-remote +++ b/bootstrap-vz-remote @@ -1,5 +1,5 @@ #!/usr/bin/env python if __name__ == '__main__': - from bootstrapvz.remote.main import main - main() + from bootstrapvz.remote.main import main + main() diff --git a/bootstrap-vz-server b/bootstrap-vz-server index bf941a0..507d5b1 100755 --- a/bootstrap-vz-server +++ b/bootstrap-vz-server @@ -1,5 +1,5 @@ #!/usr/bin/env python if __name__ == '__main__': - from bootstrapvz.remote.server import main - main() + from bootstrapvz.remote.server import main + main() diff --git a/bootstrapvz/README.rst b/bootstrapvz/README.rst index 6a5003d..443643a 100644 --- a/bootstrapvz/README.rst +++ b/bootstrapvz/README.rst @@ -13,15 +13,15 @@ via attributes. Here is an example: .. code-block:: python - class MapPartitions(Task): - description = 'Mapping volume partitions' - phase = phases.volume_preparation - predecessors = [PartitionVolume] - successors = [filesystem.Format] + class MapPartitions(Task): + description = 'Mapping volume partitions' + phase = phases.volume_preparation + predecessors = [PartitionVolume] + successors = [filesystem.Format] - @classmethod - def run(cls, info): - info.volume.partition_map.map(info.volume) + @classmethod + def run(cls, info): + info.volume.partition_map.map(info.volume) In this case the attributes define that the task at hand should run after the ``PartitionVolume`` task — i.e. after volume has been diff --git a/bootstrapvz/base/bootstrapinfo.py b/bootstrapvz/base/bootstrapinfo.py index e2cb6e9..3f5c5d1 100644 --- a/bootstrapvz/base/bootstrapinfo.py +++ b/bootstrapvz/base/bootstrapinfo.py @@ -1,160 +1,160 @@ class BootstrapInformation(object): - """The BootstrapInformation class holds all information about the bootstrapping process. - The nature of the attributes of this class are rather diverse. - Tasks may set their own attributes on this class for later retrieval by another task. - Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed. - """ - def __init__(self, manifest=None, debug=False): - """Instantiates a new bootstrap info object. + """The BootstrapInformation class holds all information about the bootstrapping process. + The nature of the attributes of this class are rather diverse. + Tasks may set their own attributes on this class for later retrieval by another task. + Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed. + """ + def __init__(self, manifest=None, debug=False): + """Instantiates a new bootstrap info object. - :param Manifest manifest: The manifest - :param bool debug: Whether debugging is turned on - """ - # Set the manifest attribute. - self.manifest = manifest - self.debug = debug + :param Manifest manifest: The manifest + :param bool debug: Whether debugging is turned on + """ + # Set the manifest attribute. + self.manifest = manifest + self.debug = debug - # Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process - import random - self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8)) + # Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process + import random + self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8)) - # Define the path to our workspace - import os.path - self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id) + # Define the path to our workspace + import os.path + self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id) - # Load all the volume information - from fs import load_volume - self.volume = load_volume(self.manifest.volume, manifest.system['bootloader']) + # Load all the volume information + from fs import load_volume + self.volume = load_volume(self.manifest.volume, manifest.system['bootloader']) - # The default apt mirror - self.apt_mirror = self.manifest.packages.get('mirror', 'http://httpredir.debian.org/debian/') + # The default apt mirror + self.apt_mirror = self.manifest.packages.get('mirror', 'http://httpredir.debian.org/debian/') - # Create the manifest_vars dictionary - self.manifest_vars = self.__create_manifest_vars(self.manifest, {'apt_mirror': self.apt_mirror}) + # Create the manifest_vars dictionary + self.manifest_vars = self.__create_manifest_vars(self.manifest, {'apt_mirror': self.apt_mirror}) - # Keep a list of apt sources, - # so that tasks may add to that list without having to fiddle with apt source list files. - from pkg.sourceslist import SourceLists - self.source_lists = SourceLists(self.manifest_vars) - # Keep a list of apt preferences - from pkg.preferenceslist import PreferenceLists - self.preference_lists = PreferenceLists(self.manifest_vars) - # Keep a list of packages that should be installed, tasks can add and remove things from this list - from pkg.packagelist import PackageList - self.packages = PackageList(self.manifest_vars, self.source_lists) + # Keep a list of apt sources, + # so that tasks may add to that list without having to fiddle with apt source list files. + from pkg.sourceslist import SourceLists + self.source_lists = SourceLists(self.manifest_vars) + # Keep a list of apt preferences + from pkg.preferenceslist import PreferenceLists + self.preference_lists = PreferenceLists(self.manifest_vars) + # Keep a list of packages that should be installed, tasks can add and remove things from this list + from pkg.packagelist import PackageList + self.packages = PackageList(self.manifest_vars, self.source_lists) - # These sets should rarely be used and specify which packages the debootstrap invocation - # should be called with. - self.include_packages = set() - self.exclude_packages = set() + # These sets should rarely be used and specify which packages the debootstrap invocation + # should be called with. + self.include_packages = set() + self.exclude_packages = set() - # Dictionary to specify which commands are required on the host. - # The keys are commands, while the values are either package names or urls - # that hint at how a command may be made available. - self.host_dependencies = {} + # Dictionary to specify which commands are required on the host. + # The keys are commands, while the values are either package names or urls + # that hint at how a command may be made available. + self.host_dependencies = {} - # Path to optional bootstrapping script for modifying the behaviour of debootstrap - # (will be used instead of e.g. /usr/share/debootstrap/scripts/jessie) - self.bootstrap_script = None + # Path to optional bootstrapping script for modifying the behaviour of debootstrap + # (will be used instead of e.g. /usr/share/debootstrap/scripts/jessie) + self.bootstrap_script = None - # Lists of startup scripts that should be installed and disabled - self.initd = {'install': {}, 'disable': []} + # Lists of startup scripts that should be installed and disabled + self.initd = {'install': {}, 'disable': []} - # Add a dictionary that can be accessed via info._pluginname for the provider and every plugin - # Information specific to the module can be added to that 'namespace', this avoids clutter. - providername = manifest.modules['provider'].__name__.split('.')[-1] - setattr(self, '_' + providername, {}) - for plugin in manifest.modules['plugins']: - pluginname = plugin.__name__.split('.')[-1] - setattr(self, '_' + pluginname, {}) + # Add a dictionary that can be accessed via info._pluginname for the provider and every plugin + # Information specific to the module can be added to that 'namespace', this avoids clutter. + providername = manifest.modules['provider'].__name__.split('.')[-1] + setattr(self, '_' + providername, {}) + for plugin in manifest.modules['plugins']: + pluginname = plugin.__name__.split('.')[-1] + setattr(self, '_' + pluginname, {}) - def __create_manifest_vars(self, manifest, additional_vars={}): - """Creates the manifest variables dictionary, based on the manifest contents - and additional data. + def __create_manifest_vars(self, manifest, additional_vars={}): + """Creates the manifest variables dictionary, based on the manifest contents + and additional data. - :param Manifest manifest: The Manifest - :param dict additional_vars: Additional values (they will take precedence and overwrite anything else) - :return: The manifest_vars dictionary - :rtype: dict - """ + :param Manifest manifest: The Manifest + :param dict additional_vars: Additional values (they will take precedence and overwrite anything else) + :return: The manifest_vars dictionary + :rtype: dict + """ - def set_manifest_vars(obj, data): - """Runs through the manifest and creates DictClasses for every key + def set_manifest_vars(obj, data): + """Runs through the manifest and creates DictClasses for every key - :param dict obj: dictionary to set the values on - :param dict data: dictionary of values to set on the obj - """ - for key, value in data.iteritems(): - if isinstance(value, dict): - obj[key] = DictClass() - set_manifest_vars(obj[key], value) - continue - # Lists are not supported - if not isinstance(value, list): - obj[key] = value + :param dict obj: dictionary to set the values on + :param dict data: dictionary of values to set on the obj + """ + for key, value in data.iteritems(): + if isinstance(value, dict): + obj[key] = DictClass() + set_manifest_vars(obj[key], value) + continue + # Lists are not supported + if not isinstance(value, list): + obj[key] = value - # manifest_vars is a dictionary of all the manifest values, - # with it users can cross-reference values in the manifest, so that they do not need to be written twice - manifest_vars = {} - set_manifest_vars(manifest_vars, manifest.data) + # manifest_vars is a dictionary of all the manifest values, + # with it users can cross-reference values in the manifest, so that they do not need to be written twice + manifest_vars = {} + set_manifest_vars(manifest_vars, manifest.data) - # Populate the manifest_vars with datetime information - # and map the datetime variables directly to the dictionary - from datetime import datetime - now = datetime.now() - time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H', - '%I', '%j', '%m', '%M', '%p', '%S', '%U', '%w', - '%W', '%x', '%X', '%y', '%Y', '%z', '%Z'] - for key in time_vars: - manifest_vars[key] = now.strftime(key) + # Populate the manifest_vars with datetime information + # and map the datetime variables directly to the dictionary + from datetime import datetime + now = datetime.now() + time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H', + '%I', '%j', '%m', '%M', '%p', '%S', '%U', '%w', + '%W', '%x', '%X', '%y', '%Y', '%z', '%Z'] + for key in time_vars: + manifest_vars[key] = now.strftime(key) - # Add any additional manifest variables - # They are added last so that they may override previous variables - set_manifest_vars(manifest_vars, additional_vars) - return manifest_vars + # Add any additional manifest variables + # They are added last so that they may override previous variables + set_manifest_vars(manifest_vars, additional_vars) + return manifest_vars - def __getstate__(self): - from bootstrapvz.remote import supported_classes + def __getstate__(self): + from bootstrapvz.remote import supported_classes - def can_serialize(obj): - if hasattr(obj, '__class__') and hasattr(obj, '__module__'): - class_name = obj.__module__ + '.' + obj.__class__.__name__ - return class_name in supported_classes or isinstance(obj, (BaseException, Exception)) - return True + def can_serialize(obj): + if hasattr(obj, '__class__') and hasattr(obj, '__module__'): + class_name = obj.__module__ + '.' + obj.__class__.__name__ + return class_name in supported_classes or isinstance(obj, (BaseException, Exception)) + return True - def filter_state(state): - if isinstance(state, dict): - return {key: filter_state(val) for key, val in state.items() if can_serialize(val)} - if isinstance(state, (set, tuple, list, frozenset)): - return type(state)(filter_state(val) for val in state if can_serialize(val)) - return state + def filter_state(state): + if isinstance(state, dict): + return {key: filter_state(val) for key, val in state.items() if can_serialize(val)} + if isinstance(state, (set, tuple, list, frozenset)): + return type(state)(filter_state(val) for val in state if can_serialize(val)) + return state - state = filter_state(self.__dict__) - state['__class__'] = self.__module__ + '.' + self.__class__.__name__ - return state + state = filter_state(self.__dict__) + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] class DictClass(dict): - """Tiny extension of dict to allow setting and getting keys via attributes - """ - def __getattr__(self, name): - return self[name] + """Tiny extension of dict to allow setting and getting keys via attributes + """ + def __getattr__(self, name): + return self[name] - def __setattr__(self, name, value): - self[name] = value + def __setattr__(self, name, value): + self[name] = value - def __delattr__(self, name): - del self[name] + def __delattr__(self, name): + del self[name] - def __getstate__(self): - return self.__dict__ + def __getstate__(self): + return self.__dict__ - def __setstate__(self, state): - for key in state: - self[key] = state[key] + def __setstate__(self, state): + for key in state: + self[key] = state[key] diff --git a/bootstrapvz/base/fs/__init__.py b/bootstrapvz/base/fs/__init__.py index ea744ef..ec86a5c 100644 --- a/bootstrapvz/base/fs/__init__.py +++ b/bootstrapvz/base/fs/__init__.py @@ -1,45 +1,45 @@ def load_volume(data, bootloader): - """Instantiates a volume that corresponds to the data in the manifest + """Instantiates a volume that corresponds to the data in the manifest - :param dict data: The 'volume' section from the manifest - :param str bootloader: Name of the bootloader the system will boot with + :param dict data: The 'volume' section from the manifest + :param str bootloader: Name of the bootloader the system will boot with - :return: The volume that represents all information pertaining to the volume we bootstrap on. - :rtype: Volume - """ - # Map valid partition maps in the manifest and their corresponding classes - from partitionmaps.gpt import GPTPartitionMap - from partitionmaps.msdos import MSDOSPartitionMap - from partitionmaps.none import NoPartitions - partition_map = {'none': NoPartitions, - 'gpt': GPTPartitionMap, - 'msdos': MSDOSPartitionMap, - }.get(data['partitions']['type']) + :return: The volume that represents all information pertaining to the volume we bootstrap on. + :rtype: Volume + """ + # Map valid partition maps in the manifest and their corresponding classes + from partitionmaps.gpt import GPTPartitionMap + from partitionmaps.msdos import MSDOSPartitionMap + from partitionmaps.none import NoPartitions + partition_map = {'none': NoPartitions, + 'gpt': GPTPartitionMap, + 'msdos': MSDOSPartitionMap, + }.get(data['partitions']['type']) - # Map valid volume backings in the manifest and their corresponding classes - from bootstrapvz.common.fs.loopbackvolume import LoopbackVolume - from bootstrapvz.providers.ec2.ebsvolume import EBSVolume - from bootstrapvz.common.fs.virtualdiskimage import VirtualDiskImage - from bootstrapvz.common.fs.virtualharddisk import VirtualHardDisk - from bootstrapvz.common.fs.virtualmachinedisk import VirtualMachineDisk - from bootstrapvz.common.fs.folder import Folder - volume_backing = {'raw': LoopbackVolume, - 's3': LoopbackVolume, - 'vdi': VirtualDiskImage, - 'vhd': VirtualHardDisk, - 'vmdk': VirtualMachineDisk, - 'ebs': EBSVolume, - 'folder': Folder - }.get(data['backing']) + # Map valid volume backings in the manifest and their corresponding classes + from bootstrapvz.common.fs.loopbackvolume import LoopbackVolume + from bootstrapvz.providers.ec2.ebsvolume import EBSVolume + from bootstrapvz.common.fs.virtualdiskimage import VirtualDiskImage + from bootstrapvz.common.fs.virtualharddisk import VirtualHardDisk + from bootstrapvz.common.fs.virtualmachinedisk import VirtualMachineDisk + from bootstrapvz.common.fs.folder import Folder + volume_backing = {'raw': LoopbackVolume, + 's3': LoopbackVolume, + 'vdi': VirtualDiskImage, + 'vhd': VirtualHardDisk, + 'vmdk': VirtualMachineDisk, + 'ebs': EBSVolume, + 'folder': Folder + }.get(data['backing']) - # Instantiate the partition map - from bootstrapvz.common.bytes import Bytes - # Only operate with a physical sector size of 512 bytes for now, - # not sure if we can change that for some of the virtual disks - sector_size = Bytes('512B') - partition_map = partition_map(data['partitions'], sector_size, bootloader) + # Instantiate the partition map + from bootstrapvz.common.bytes import Bytes + # Only operate with a physical sector size of 512 bytes for now, + # not sure if we can change that for some of the virtual disks + sector_size = Bytes('512B') + partition_map = partition_map(data['partitions'], sector_size, bootloader) - # Create the volume with the partition map as an argument - return volume_backing(partition_map) + # Create the volume with the partition map as an argument + return volume_backing(partition_map) diff --git a/bootstrapvz/base/fs/exceptions.py b/bootstrapvz/base/fs/exceptions.py index fad7868..197c525 100644 --- a/bootstrapvz/base/fs/exceptions.py +++ b/bootstrapvz/base/fs/exceptions.py @@ -1,12 +1,12 @@ class VolumeError(Exception): - """Raised when an error occurs while interacting with the volume - """ - pass + """Raised when an error occurs while interacting with the volume + """ + pass class PartitionError(Exception): - """Raised when an error occurs while interacting with the partitions on the volume - """ - pass + """Raised when an error occurs while interacting with the partitions on the volume + """ + pass diff --git a/bootstrapvz/base/fs/partitionmaps/abstract.py b/bootstrapvz/base/fs/partitionmaps/abstract.py index cde7c2f..407eecf 100644 --- a/bootstrapvz/base/fs/partitionmaps/abstract.py +++ b/bootstrapvz/base/fs/partitionmaps/abstract.py @@ -6,117 +6,117 @@ from ..exceptions import PartitionError class AbstractPartitionMap(FSMProxy): - """Abstract representation of a partiton map - This class is a finite state machine and represents the state of the real partition map - """ + """Abstract representation of a partiton map + This class is a finite state machine and represents the state of the real partition map + """ - __metaclass__ = ABCMeta + __metaclass__ = ABCMeta - # States the partition map can be in - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, - {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, - {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, - ] + # States the partition map can be in + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, + {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, + {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, + ] - def __init__(self, bootloader): - """ - :param str bootloader: Name of the bootloader we will use for bootstrapping - """ - # Create the configuration for the state machine - cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} - super(AbstractPartitionMap, self).__init__(cfg) + def __init__(self, bootloader): + """ + :param str bootloader: Name of the bootloader we will use for bootstrapping + """ + # Create the configuration for the state machine + cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} + super(AbstractPartitionMap, self).__init__(cfg) - def is_blocking(self): - """Returns whether the partition map is blocking volume detach operations + def is_blocking(self): + """Returns whether the partition map is blocking volume detach operations - :rtype: bool - """ - return self.fsm.current == 'mapped' + :rtype: bool + """ + return self.fsm.current == 'mapped' - def get_total_size(self): - """Returns the total size the partitions occupy + def get_total_size(self): + """Returns the total size the partitions occupy - :return: The size of all partitions - :rtype: Sectors - """ - # We just need the endpoint of the last partition - return self.partitions[-1].get_end() + :return: The size of all partitions + :rtype: Sectors + """ + # We just need the endpoint of the last partition + return self.partitions[-1].get_end() - def create(self, volume): - """Creates the partition map + def create(self, volume): + """Creates the partition map - :param Volume volume: The volume to create the partition map on - """ - self.fsm.create(volume=volume) + :param Volume volume: The volume to create the partition map on + """ + self.fsm.create(volume=volume) - @abstractmethod - def _before_create(self, event): - pass + @abstractmethod + def _before_create(self, event): + pass - def map(self, volume): - """Maps the partition map to device nodes + def map(self, volume): + """Maps the partition map to device nodes - :param Volume volume: The volume the partition map resides on - """ - self.fsm.map(volume=volume) + :param Volume volume: The volume the partition map resides on + """ + self.fsm.map(volume=volume) - def _before_map(self, event): - """ - :raises PartitionError: In case a partition could not be mapped. - """ - volume = event.volume - try: - # Ask kpartx how the partitions will be mapped before actually attaching them. - mappings = log_check_call(['kpartx', '-l', volume.device_path]) - import re - regexp = re.compile('^(?P.+[^\d](?P\d+)) : ' - '(?P\d) (?P\d+) ' - '{device_path} (?P\d+)$' - .format(device_path=volume.device_path)) - log_check_call(['kpartx', '-as', volume.device_path]) + def _before_map(self, event): + """ + :raises PartitionError: In case a partition could not be mapped. + """ + volume = event.volume + try: + # Ask kpartx how the partitions will be mapped before actually attaching them. + mappings = log_check_call(['kpartx', '-l', volume.device_path]) + import re + regexp = re.compile('^(?P.+[^\d](?P\d+)) : ' + '(?P\d) (?P\d+) ' + '{device_path} (?P\d+)$' + .format(device_path=volume.device_path)) + log_check_call(['kpartx', '-as', volume.device_path]) - import os.path - # Run through the kpartx output and map the paths to the partitions - for mapping in mappings: - match = regexp.match(mapping) - if match is None: - raise PartitionError('Unable to parse kpartx output: ' + mapping) - partition_path = os.path.join('/dev/mapper', match.group('name')) - p_idx = int(match.group('p_idx')) - 1 - self.partitions[p_idx].map(partition_path) + import os.path + # Run through the kpartx output and map the paths to the partitions + for mapping in mappings: + match = regexp.match(mapping) + if match is None: + raise PartitionError('Unable to parse kpartx output: ' + mapping) + partition_path = os.path.join('/dev/mapper', match.group('name')) + p_idx = int(match.group('p_idx')) - 1 + self.partitions[p_idx].map(partition_path) - # Check if any partition was not mapped - for idx, partition in enumerate(self.partitions): - if partition.fsm.current not in ['mapped', 'formatted']: - raise PartitionError('kpartx did not map partition #' + str(partition.get_index())) + # Check if any partition was not mapped + for idx, partition in enumerate(self.partitions): + if partition.fsm.current not in ['mapped', 'formatted']: + raise PartitionError('kpartx did not map partition #' + str(partition.get_index())) - except PartitionError: - # Revert any mapping and reraise the error - for partition in self.partitions: - if partition.fsm.can('unmap'): - partition.unmap() - log_check_call(['kpartx', '-ds', volume.device_path]) - raise + except PartitionError: + # Revert any mapping and reraise the error + for partition in self.partitions: + if partition.fsm.can('unmap'): + partition.unmap() + log_check_call(['kpartx', '-ds', volume.device_path]) + raise - def unmap(self, volume): - """Unmaps the partition + def unmap(self, volume): + """Unmaps the partition - :param Volume volume: The volume to unmap the partition map from - """ - self.fsm.unmap(volume=volume) + :param Volume volume: The volume to unmap the partition map from + """ + self.fsm.unmap(volume=volume) - def _before_unmap(self, event): - """ - :raises PartitionError: If the a partition cannot be unmapped - """ - volume = event.volume - # Run through all partitions before unmapping and make sure they can all be unmapped - for partition in self.partitions: - if partition.fsm.cannot('unmap'): - msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition) - raise PartitionError(msg) - # Actually unmap the partitions - log_check_call(['kpartx', '-ds', volume.device_path]) - # Call unmap on all partitions - for partition in self.partitions: - partition.unmap() + def _before_unmap(self, event): + """ + :raises PartitionError: If the a partition cannot be unmapped + """ + volume = event.volume + # Run through all partitions before unmapping and make sure they can all be unmapped + for partition in self.partitions: + if partition.fsm.cannot('unmap'): + msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition) + raise PartitionError(msg) + # Actually unmap the partitions + log_check_call(['kpartx', '-ds', volume.device_path]) + # Call unmap on all partitions + for partition in self.partitions: + partition.unmap() diff --git a/bootstrapvz/base/fs/partitionmaps/gpt.py b/bootstrapvz/base/fs/partitionmaps/gpt.py index 44f8385..c515715 100644 --- a/bootstrapvz/base/fs/partitionmaps/gpt.py +++ b/bootstrapvz/base/fs/partitionmaps/gpt.py @@ -5,92 +5,92 @@ from bootstrapvz.common.tools import log_check_call class GPTPartitionMap(AbstractPartitionMap): - """Represents a GPT partition map - """ + """Represents a GPT partition map + """ - def __init__(self, data, sector_size, bootloader): - """ - :param dict data: volume.partitions part of the manifest - :param int sector_size: Sectorsize of the volume - :param str bootloader: Name of the bootloader we will use for bootstrapping - """ - from bootstrapvz.common.sectors import Sectors + def __init__(self, data, sector_size, bootloader): + """ + :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume + :param str bootloader: Name of the bootloader we will use for bootstrapping + """ + from bootstrapvz.common.sectors import Sectors - # List of partitions - self.partitions = [] + # List of partitions + self.partitions = [] - # Returns the last partition unless there is none - def last_partition(): - return self.partitions[-1] if len(self.partitions) > 0 else None + # Returns the last partition unless there is none + def last_partition(): + return self.partitions[-1] if len(self.partitions) > 0 else None - if bootloader == 'grub': - # If we are using the grub bootloader we need to create an unformatted partition - # at the beginning of the map. Its size is 1007kb, which seems to be chosen so that - # primary gpt + grub = 1024KiB - # The 1 MiB will be subtracted later on, once we know what the subsequent partition is - from ..partitions.unformatted import UnformattedPartition - self.grub_boot = UnformattedPartition(Sectors('1MiB', sector_size), last_partition()) - self.partitions.append(self.grub_boot) + if bootloader == 'grub': + # If we are using the grub bootloader we need to create an unformatted partition + # at the beginning of the map. Its size is 1007kb, which seems to be chosen so that + # primary gpt + grub = 1024KiB + # The 1 MiB will be subtracted later on, once we know what the subsequent partition is + from ..partitions.unformatted import UnformattedPartition + self.grub_boot = UnformattedPartition(Sectors('1MiB', sector_size), last_partition()) + self.partitions.append(self.grub_boot) - # Offset all partitions by 1 sector. - # parted in jessie has changed and no longer allows - # partitions to be right next to each other. - partition_gap = Sectors(1, sector_size) + # Offset all partitions by 1 sector. + # parted in jessie has changed and no longer allows + # partitions to be right next to each other. + partition_gap = Sectors(1, sector_size) - # The boot and swap partitions are optional - if 'boot' in data: - self.boot = GPTPartition(Sectors(data['boot']['size'], sector_size), - data['boot']['filesystem'], data['boot'].get('format_command', None), - 'boot', last_partition()) - if self.boot.previous is not None: - # No need to pad if this is the first partition - self.boot.pad_start += partition_gap - self.boot.size -= partition_gap - self.partitions.append(self.boot) + # The boot and swap partitions are optional + if 'boot' in data: + self.boot = GPTPartition(Sectors(data['boot']['size'], sector_size), + data['boot']['filesystem'], data['boot'].get('format_command', None), + 'boot', last_partition()) + if self.boot.previous is not None: + # No need to pad if this is the first partition + self.boot.pad_start += partition_gap + self.boot.size -= partition_gap + self.partitions.append(self.boot) - if 'swap' in data: - self.swap = GPTSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) - if self.swap.previous is not None: - self.swap.pad_start += partition_gap - self.swap.size -= partition_gap - self.partitions.append(self.swap) + if 'swap' in data: + self.swap = GPTSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) + if self.swap.previous is not None: + self.swap.pad_start += partition_gap + self.swap.size -= partition_gap + self.partitions.append(self.swap) - self.root = GPTPartition(Sectors(data['root']['size'], sector_size), - data['root']['filesystem'], data['root'].get('format_command', None), - 'root', last_partition()) - if self.root.previous is not None: - self.root.pad_start += partition_gap - self.root.size -= partition_gap - self.partitions.append(self.root) + self.root = GPTPartition(Sectors(data['root']['size'], sector_size), + data['root']['filesystem'], data['root'].get('format_command', None), + 'root', last_partition()) + if self.root.previous is not None: + self.root.pad_start += partition_gap + self.root.size -= partition_gap + self.partitions.append(self.root) - if hasattr(self, 'grub_boot'): - # Mark the grub partition as a bios_grub partition - self.grub_boot.flags.append('bios_grub') - # Subtract the grub partition size from the subsequent partition - self.partitions[1].size -= self.grub_boot.size - else: - # Not using grub, mark the boot partition or root as bootable - getattr(self, 'boot', self.root).flags.append('legacy_boot') + if hasattr(self, 'grub_boot'): + # Mark the grub partition as a bios_grub partition + self.grub_boot.flags.append('bios_grub') + # Subtract the grub partition size from the subsequent partition + self.partitions[1].size -= self.grub_boot.size + else: + # Not using grub, mark the boot partition or root as bootable + getattr(self, 'boot', self.root).flags.append('legacy_boot') - # The first and last 34 sectors are reserved for the primary/secondary GPT - primary_gpt_size = Sectors(34, sector_size) - self.partitions[0].pad_start += primary_gpt_size - self.partitions[0].size -= primary_gpt_size + # The first and last 34 sectors are reserved for the primary/secondary GPT + primary_gpt_size = Sectors(34, sector_size) + self.partitions[0].pad_start += primary_gpt_size + self.partitions[0].size -= primary_gpt_size - secondary_gpt_size = Sectors(34, sector_size) - self.partitions[-1].pad_end += secondary_gpt_size - self.partitions[-1].size -= secondary_gpt_size + secondary_gpt_size = Sectors(34, sector_size) + self.partitions[-1].pad_end += secondary_gpt_size + self.partitions[-1].size -= secondary_gpt_size - super(GPTPartitionMap, self).__init__(bootloader) + super(GPTPartitionMap, self).__init__(bootloader) - def _before_create(self, event): - """Creates the partition map - """ - volume = event.volume - # Disk alignment still plays a role in virtualized environment, - # but I honestly have no clue as to what best practice is here, so we choose 'none' - log_check_call(['parted', '--script', '--align', 'none', volume.device_path, - '--', 'mklabel', 'gpt']) - # Create the partitions - for partition in self.partitions: - partition.create(volume) + def _before_create(self, event): + """Creates the partition map + """ + volume = event.volume + # Disk alignment still plays a role in virtualized environment, + # but I honestly have no clue as to what best practice is here, so we choose 'none' + log_check_call(['parted', '--script', '--align', 'none', volume.device_path, + '--', 'mklabel', 'gpt']) + # Create the partitions + for partition in self.partitions: + partition.create(volume) diff --git a/bootstrapvz/base/fs/partitionmaps/msdos.py b/bootstrapvz/base/fs/partitionmaps/msdos.py index 6c0d25d..22e53fd 100644 --- a/bootstrapvz/base/fs/partitionmaps/msdos.py +++ b/bootstrapvz/base/fs/partitionmaps/msdos.py @@ -5,82 +5,82 @@ from bootstrapvz.common.tools import log_check_call class MSDOSPartitionMap(AbstractPartitionMap): - """Represents a MS-DOS partition map - Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is) - """ + """Represents a MS-DOS partition map + Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is) + """ - def __init__(self, data, sector_size, bootloader): - """ - :param dict data: volume.partitions part of the manifest - :param int sector_size: Sectorsize of the volume - :param str bootloader: Name of the bootloader we will use for bootstrapping - """ - from bootstrapvz.common.sectors import Sectors + def __init__(self, data, sector_size, bootloader): + """ + :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume + :param str bootloader: Name of the bootloader we will use for bootstrapping + """ + from bootstrapvz.common.sectors import Sectors - # List of partitions - self.partitions = [] + # List of partitions + self.partitions = [] - # Returns the last partition unless there is none - def last_partition(): - return self.partitions[-1] if len(self.partitions) > 0 else None + # Returns the last partition unless there is none + def last_partition(): + return self.partitions[-1] if len(self.partitions) > 0 else None - # The boot and swap partitions are optional - if 'boot' in data: - self.boot = MSDOSPartition(Sectors(data['boot']['size'], sector_size), - data['boot']['filesystem'], data['boot'].get('format_command', None), - last_partition()) - self.partitions.append(self.boot) + # The boot and swap partitions are optional + if 'boot' in data: + self.boot = MSDOSPartition(Sectors(data['boot']['size'], sector_size), + data['boot']['filesystem'], data['boot'].get('format_command', None), + last_partition()) + self.partitions.append(self.boot) - # Offset all partitions by 1 sector. - # parted in jessie has changed and no longer allows - # partitions to be right next to each other. - partition_gap = Sectors(1, sector_size) + # Offset all partitions by 1 sector. + # parted in jessie has changed and no longer allows + # partitions to be right next to each other. + partition_gap = Sectors(1, sector_size) - if 'swap' in data: - self.swap = MSDOSSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) - if self.swap.previous is not None: - # No need to pad if this is the first partition - self.swap.pad_start += partition_gap - self.swap.size -= partition_gap - self.partitions.append(self.swap) + if 'swap' in data: + self.swap = MSDOSSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) + if self.swap.previous is not None: + # No need to pad if this is the first partition + self.swap.pad_start += partition_gap + self.swap.size -= partition_gap + self.partitions.append(self.swap) - self.root = MSDOSPartition(Sectors(data['root']['size'], sector_size), - data['root']['filesystem'], data['root'].get('format_command', None), - last_partition()) - if self.root.previous is not None: - self.root.pad_start += partition_gap - self.root.size -= partition_gap - self.partitions.append(self.root) + self.root = MSDOSPartition(Sectors(data['root']['size'], sector_size), + data['root']['filesystem'], data['root'].get('format_command', None), + last_partition()) + if self.root.previous is not None: + self.root.pad_start += partition_gap + self.root.size -= partition_gap + self.partitions.append(self.root) - # Mark boot as the boot partition, or root, if boot does not exist - getattr(self, 'boot', self.root).flags.append('boot') + # Mark boot as the boot partition, or root, if boot does not exist + getattr(self, 'boot', self.root).flags.append('boot') - # If we are using the grub bootloader, we will need to add a 2 MB offset - # at the beginning of the partitionmap and steal it from the first partition. - # The MBR offset is included in the grub offset, so if we don't use grub - # we should reduce the size of the first partition and move it by only 512 bytes. - if bootloader == 'grub': - mbr_offset = Sectors('2MiB', sector_size) - else: - mbr_offset = Sectors('512B', sector_size) + # If we are using the grub bootloader, we will need to add a 2 MB offset + # at the beginning of the partitionmap and steal it from the first partition. + # The MBR offset is included in the grub offset, so if we don't use grub + # we should reduce the size of the first partition and move it by only 512 bytes. + if bootloader == 'grub': + mbr_offset = Sectors('2MiB', sector_size) + else: + mbr_offset = Sectors('512B', sector_size) - self.partitions[0].pad_start += mbr_offset - self.partitions[0].size -= mbr_offset + self.partitions[0].pad_start += mbr_offset + self.partitions[0].size -= mbr_offset - # Leave the last sector unformatted - # parted in jessie thinks that a partition 10 sectors in size - # goes from sector 0 to sector 9 (instead of 0 to 10) - self.partitions[-1].pad_end += 1 - self.partitions[-1].size -= 1 + # Leave the last sector unformatted + # parted in jessie thinks that a partition 10 sectors in size + # goes from sector 0 to sector 9 (instead of 0 to 10) + self.partitions[-1].pad_end += 1 + self.partitions[-1].size -= 1 - super(MSDOSPartitionMap, self).__init__(bootloader) + super(MSDOSPartitionMap, self).__init__(bootloader) - def _before_create(self, event): - volume = event.volume - # Disk alignment still plays a role in virtualized environment, - # but I honestly have no clue as to what best practice is here, so we choose 'none' - log_check_call(['parted', '--script', '--align', 'none', volume.device_path, - '--', 'mklabel', 'msdos']) - # Create the partitions - for partition in self.partitions: - partition.create(volume) + def _before_create(self, event): + volume = event.volume + # Disk alignment still plays a role in virtualized environment, + # but I honestly have no clue as to what best practice is here, so we choose 'none' + log_check_call(['parted', '--script', '--align', 'none', volume.device_path, + '--', 'mklabel', 'msdos']) + # Create the partitions + for partition in self.partitions: + partition.create(volume) diff --git a/bootstrapvz/base/fs/partitionmaps/none.py b/bootstrapvz/base/fs/partitionmaps/none.py index 944f8a5..e73495b 100644 --- a/bootstrapvz/base/fs/partitionmaps/none.py +++ b/bootstrapvz/base/fs/partitionmaps/none.py @@ -2,44 +2,44 @@ from ..partitions.single import SinglePartition class NoPartitions(object): - """Represents a virtual 'NoPartitions' partitionmap. - This virtual partition map exists because it is easier for tasks to - simply always deal with partition maps and then let the base abstract that away. - """ + """Represents a virtual 'NoPartitions' partitionmap. + This virtual partition map exists because it is easier for tasks to + simply always deal with partition maps and then let the base abstract that away. + """ - def __init__(self, data, sector_size, bootloader): - """ - :param dict data: volume.partitions part of the manifest - :param int sector_size: Sectorsize of the volume - :param str bootloader: Name of the bootloader we will use for bootstrapping - """ - from bootstrapvz.common.sectors import Sectors + def __init__(self, data, sector_size, bootloader): + """ + :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume + :param str bootloader: Name of the bootloader we will use for bootstrapping + """ + from bootstrapvz.common.sectors import Sectors - # In the NoPartitions partitions map we only have a single 'partition' - self.root = SinglePartition(Sectors(data['root']['size'], sector_size), - data['root']['filesystem'], data['root'].get('format_command', None)) - self.partitions = [self.root] + # In the NoPartitions partitions map we only have a single 'partition' + self.root = SinglePartition(Sectors(data['root']['size'], sector_size), + data['root']['filesystem'], data['root'].get('format_command', None)) + self.partitions = [self.root] - def is_blocking(self): - """Returns whether the partition map is blocking volume detach operations + def is_blocking(self): + """Returns whether the partition map is blocking volume detach operations - :rtype: bool - """ - return self.root.fsm.current == 'mounted' + :rtype: bool + """ + return self.root.fsm.current == 'mounted' - def get_total_size(self): - """Returns the total size the partitions occupy + def get_total_size(self): + """Returns the total size the partitions occupy - :return: The size of all the partitions - :rtype: Sectors - """ - return self.root.get_end() + :return: The size of all the partitions + :rtype: Sectors + """ + return self.root.get_end() - def __getstate__(self): - state = self.__dict__.copy() - state['__class__'] = self.__module__ + '.' + self.__class__.__name__ - return state + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/base/fs/partitions/abstract.py b/bootstrapvz/base/fs/partitions/abstract.py index 9d481d7..d2f18cc 100644 --- a/bootstrapvz/base/fs/partitions/abstract.py +++ b/bootstrapvz/base/fs/partitions/abstract.py @@ -6,124 +6,124 @@ from bootstrapvz.common.fsm_proxy import FSMProxy class AbstractPartition(FSMProxy): - """Abstract representation of a partiton - This class is a finite state machine and represents the state of the real partition - """ + """Abstract representation of a partiton + This class is a finite state machine and represents the state of the real partition + """ - __metaclass__ = ABCMeta + __metaclass__ = ABCMeta - # Our states - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'}, - {'name': 'format', 'src': 'created', 'dst': 'formatted'}, - {'name': 'mount', 'src': 'formatted', 'dst': 'mounted'}, - {'name': 'unmount', 'src': 'mounted', 'dst': 'formatted'}, - ] + # Our states + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'}, + {'name': 'format', 'src': 'created', 'dst': 'formatted'}, + {'name': 'mount', 'src': 'formatted', 'dst': 'mounted'}, + {'name': 'unmount', 'src': 'mounted', 'dst': 'formatted'}, + ] - def __init__(self, size, filesystem, format_command): - """ - :param Bytes size: Size of the partition - :param str filesystem: Filesystem the partition should be formatted with - :param list format_command: Optional format command, valid variables are fs, device_path and size - """ - self.size = size - self.filesystem = filesystem - self.format_command = format_command - # Initialize the start & end padding to 0 sectors, may be changed later - self.pad_start = Sectors(0, size.sector_size) - self.pad_end = Sectors(0, size.sector_size) - # Path to the partition - self.device_path = None - # Dictionary with mount points as keys and Mount objects as values - self.mounts = {} + def __init__(self, size, filesystem, format_command): + """ + :param Bytes size: Size of the partition + :param str filesystem: Filesystem the partition should be formatted with + :param list format_command: Optional format command, valid variables are fs, device_path and size + """ + self.size = size + self.filesystem = filesystem + self.format_command = format_command + # Initialize the start & end padding to 0 sectors, may be changed later + self.pad_start = Sectors(0, size.sector_size) + self.pad_end = Sectors(0, size.sector_size) + # Path to the partition + self.device_path = None + # Dictionary with mount points as keys and Mount objects as values + self.mounts = {} - # Create the configuration for our state machine - cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} - super(AbstractPartition, self).__init__(cfg) + # Create the configuration for our state machine + cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}} + super(AbstractPartition, self).__init__(cfg) - def get_uuid(self): - """Gets the UUID of the partition + def get_uuid(self): + """Gets the UUID of the partition - :return: The UUID of the partition - :rtype: str - """ - [uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path]) - return uuid + :return: The UUID of the partition + :rtype: str + """ + [uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path]) + return uuid - @abstractmethod - def get_start(self): - pass + @abstractmethod + def get_start(self): + pass - def get_end(self): - """Gets the end of the partition + def get_end(self): + """Gets the end of the partition - :return: The end of the partition - :rtype: Sectors - """ - return self.get_start() + self.pad_start + self.size + self.pad_end + :return: The end of the partition + :rtype: Sectors + """ + return self.get_start() + self.pad_start + self.size + self.pad_end - def _before_format(self, e): - """Formats the partition - """ - # If there is no explicit format_command define we simply call mkfs.fstype - if self.format_command is None: - format_command = ['mkfs.{fs}', '{device_path}'] - else: - format_command = self.format_command - variables = {'fs': self.filesystem, - 'device_path': self.device_path, - 'size': self.size, - } - command = map(lambda part: part.format(**variables), format_command) - # Format the partition - log_check_call(command) + def _before_format(self, e): + """Formats the partition + """ + # If there is no explicit format_command define we simply call mkfs.fstype + if self.format_command is None: + format_command = ['mkfs.{fs}', '{device_path}'] + else: + format_command = self.format_command + variables = {'fs': self.filesystem, + 'device_path': self.device_path, + 'size': self.size, + } + command = map(lambda part: part.format(**variables), format_command) + # Format the partition + log_check_call(command) - def _before_mount(self, e): - """Mount the partition - """ - log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination]) - self.mount_dir = e.destination + def _before_mount(self, e): + """Mount the partition + """ + log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination]) + self.mount_dir = e.destination - def _after_mount(self, e): - """Mount any mounts associated with this partition - """ - # Make sure we mount in ascending order of mountpoint path length - # This ensures that we don't mount /dev/pts before we mount /dev - for destination in sorted(self.mounts.iterkeys(), key=len): - self.mounts[destination].mount(self.mount_dir) + def _after_mount(self, e): + """Mount any mounts associated with this partition + """ + # Make sure we mount in ascending order of mountpoint path length + # This ensures that we don't mount /dev/pts before we mount /dev + for destination in sorted(self.mounts.iterkeys(), key=len): + self.mounts[destination].mount(self.mount_dir) - def _before_unmount(self, e): - """Unmount any mounts associated with this partition - """ - # Unmount the mounts in descending order of mounpoint path length - # You cannot unmount /dev before you have unmounted /dev/pts - for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True): - self.mounts[destination].unmount() - log_check_call(['umount', self.mount_dir]) - del self.mount_dir + def _before_unmount(self, e): + """Unmount any mounts associated with this partition + """ + # Unmount the mounts in descending order of mounpoint path length + # You cannot unmount /dev before you have unmounted /dev/pts + for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True): + self.mounts[destination].unmount() + log_check_call(['umount', self.mount_dir]) + del self.mount_dir - def add_mount(self, source, destination, opts=[]): - """Associate a mount with this partition - Automatically mounts it + def add_mount(self, source, destination, opts=[]): + """Associate a mount with this partition + Automatically mounts it - :param str,AbstractPartition source: The source of the mount - :param str destination: The path to the mountpoint - :param list opts: Any options that should be passed to the mount command - """ - # Create a new mount object, mount it if the partition is mounted and put it in the mounts dict - from mount import Mount - mount = Mount(source, destination, opts) - if self.fsm.current == 'mounted': - mount.mount(self.mount_dir) - self.mounts[destination] = mount + :param str,AbstractPartition source: The source of the mount + :param str destination: The path to the mountpoint + :param list opts: Any options that should be passed to the mount command + """ + # Create a new mount object, mount it if the partition is mounted and put it in the mounts dict + from mount import Mount + mount = Mount(source, destination, opts) + if self.fsm.current == 'mounted': + mount.mount(self.mount_dir) + self.mounts[destination] = mount - def remove_mount(self, destination): - """Remove a mount from this partition - Automatically unmounts it + def remove_mount(self, destination): + """Remove a mount from this partition + Automatically unmounts it - :param str destination: The mountpoint path of the mount that should be removed - """ - # Unmount the mount if the partition is mounted and delete it from the mounts dict - # If the mount is already unmounted and the source is a partition, this will raise an exception - if self.fsm.current == 'mounted': - self.mounts[destination].unmount() - del self.mounts[destination] + :param str destination: The mountpoint path of the mount that should be removed + """ + # Unmount the mount if the partition is mounted and delete it from the mounts dict + # If the mount is already unmounted and the source is a partition, this will raise an exception + if self.fsm.current == 'mounted': + self.mounts[destination].unmount() + del self.mounts[destination] diff --git a/bootstrapvz/base/fs/partitions/base.py b/bootstrapvz/base/fs/partitions/base.py index df60712..00d0063 100644 --- a/bootstrapvz/base/fs/partitions/base.py +++ b/bootstrapvz/base/fs/partitions/base.py @@ -4,135 +4,135 @@ from bootstrapvz.common.sectors import Sectors class BasePartition(AbstractPartition): - """Represents a partition that is actually a partition (and not a virtual one like 'Single') - """ + """Represents a partition that is actually a partition (and not a virtual one like 'Single') + """ - # Override the states of the abstract partition - # A real partition can be mapped and unmapped - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, - {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, - {'name': 'format', 'src': 'mapped', 'dst': 'formatted'}, - {'name': 'mount', 'src': 'formatted', 'dst': 'mounted'}, - {'name': 'unmount', 'src': 'mounted', 'dst': 'formatted'}, - {'name': 'unmap', 'src': 'formatted', 'dst': 'unmapped_fmt'}, + # Override the states of the abstract partition + # A real partition can be mapped and unmapped + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, + {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, + {'name': 'format', 'src': 'mapped', 'dst': 'formatted'}, + {'name': 'mount', 'src': 'formatted', 'dst': 'mounted'}, + {'name': 'unmount', 'src': 'mounted', 'dst': 'formatted'}, + {'name': 'unmap', 'src': 'formatted', 'dst': 'unmapped_fmt'}, - {'name': 'map', 'src': 'unmapped_fmt', 'dst': 'formatted'}, - {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, - ] + {'name': 'map', 'src': 'unmapped_fmt', 'dst': 'formatted'}, + {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, + ] - def __init__(self, size, filesystem, format_command, previous): - """ - :param Bytes size: Size of the partition - :param str filesystem: Filesystem the partition should be formatted with - :param list format_command: Optional format command, valid variables are fs, device_path and size - :param BasePartition previous: The partition that preceeds this one - """ - # By saving the previous partition we have a linked list - # that partitions can go backwards in to find the first partition. - self.previous = previous - # List of flags that parted should put on the partition - self.flags = [] - # Path to symlink in /dev/disk/by-uuid (manually maintained by this class) - self.disk_by_uuid_path = None - super(BasePartition, self).__init__(size, filesystem, format_command) + def __init__(self, size, filesystem, format_command, previous): + """ + :param Bytes size: Size of the partition + :param str filesystem: Filesystem the partition should be formatted with + :param list format_command: Optional format command, valid variables are fs, device_path and size + :param BasePartition previous: The partition that preceeds this one + """ + # By saving the previous partition we have a linked list + # that partitions can go backwards in to find the first partition. + self.previous = previous + # List of flags that parted should put on the partition + self.flags = [] + # Path to symlink in /dev/disk/by-uuid (manually maintained by this class) + self.disk_by_uuid_path = None + super(BasePartition, self).__init__(size, filesystem, format_command) - def create(self, volume): - """Creates the partition + def create(self, volume): + """Creates the partition - :param Volume volume: The volume to create the partition on - """ - self.fsm.create(volume=volume) + :param Volume volume: The volume to create the partition on + """ + self.fsm.create(volume=volume) - def get_index(self): - """Gets the index of this partition in the partition map + def get_index(self): + """Gets the index of this partition in the partition map - :return: The index of the partition in the partition map - :rtype: int - """ - if self.previous is None: - # Partitions are 1 indexed - return 1 - else: - # Recursive call to the previous partition, walking up the chain... - return self.previous.get_index() + 1 + :return: The index of the partition in the partition map + :rtype: int + """ + if self.previous is None: + # Partitions are 1 indexed + return 1 + else: + # Recursive call to the previous partition, walking up the chain... + return self.previous.get_index() + 1 - def get_start(self): - """Gets the starting byte of this partition + def get_start(self): + """Gets the starting byte of this partition - :return: The starting byte of this partition - :rtype: Sectors - """ - if self.previous is None: - return Sectors(0, self.size.sector_size) - else: - return self.previous.get_end() + :return: The starting byte of this partition + :rtype: Sectors + """ + if self.previous is None: + return Sectors(0, self.size.sector_size) + else: + return self.previous.get_end() - def map(self, device_path): - """Maps the partition to a device_path + def map(self, device_path): + """Maps the partition to a device_path - :param str device_path: The device path this partition should be mapped to - """ - self.fsm.map(device_path=device_path) + :param str device_path: The device path this partition should be mapped to + """ + self.fsm.map(device_path=device_path) - def link_uuid(self): - # /lib/udev/rules.d/60-kpartx.rules does not create symlinks in /dev/disk/by-{uuid,label} - # This patch would fix that: http://www.redhat.com/archives/dm-devel/2013-July/msg00080.html - # For now we just do the uuid part ourselves. - # This is mainly to fix a problem in update-grub where /etc/grub.d/10_linux - # checks if the $GRUB_DEVICE_UUID exists in /dev/disk/by-uuid and falls - # back to $GRUB_DEVICE if it doesn't. - # $GRUB_DEVICE is /dev/mapper/xvd{f,g...}# (on ec2), opposed to /dev/xvda# when booting. - # Creating the symlink ensures that grub consistently uses - # $GRUB_DEVICE_UUID when creating /boot/grub/grub.cfg - self.disk_by_uuid_path = os.path.join('/dev/disk/by-uuid', self.get_uuid()) - if not os.path.exists(self.disk_by_uuid_path): - os.symlink(self.device_path, self.disk_by_uuid_path) + def link_uuid(self): + # /lib/udev/rules.d/60-kpartx.rules does not create symlinks in /dev/disk/by-{uuid,label} + # This patch would fix that: http://www.redhat.com/archives/dm-devel/2013-July/msg00080.html + # For now we just do the uuid part ourselves. + # This is mainly to fix a problem in update-grub where /etc/grub.d/10_linux + # checks if the $GRUB_DEVICE_UUID exists in /dev/disk/by-uuid and falls + # back to $GRUB_DEVICE if it doesn't. + # $GRUB_DEVICE is /dev/mapper/xvd{f,g...}# (on ec2), opposed to /dev/xvda# when booting. + # Creating the symlink ensures that grub consistently uses + # $GRUB_DEVICE_UUID when creating /boot/grub/grub.cfg + self.disk_by_uuid_path = os.path.join('/dev/disk/by-uuid', self.get_uuid()) + if not os.path.exists(self.disk_by_uuid_path): + os.symlink(self.device_path, self.disk_by_uuid_path) - def unlink_uuid(self): - if os.path.isfile(self.disk_by_uuid_path): - os.remove(self.disk_by_uuid_path) - self.disk_by_uuid_path = None + def unlink_uuid(self): + if os.path.isfile(self.disk_by_uuid_path): + os.remove(self.disk_by_uuid_path) + self.disk_by_uuid_path = None - def _before_create(self, e): - """Creates the partition - """ - from bootstrapvz.common.tools import log_check_call - # The create command is fairly simple: - # - fs_type is the partition filesystem, as defined by parted: - # fs-type can be one of "fat16", "fat32", "ext2", "HFS", "linux-swap", - # "NTFS", "reiserfs", or "ufs". - # - start and end are just Bytes objects coerced into strings - if self.filesystem == 'swap': - fs_type = 'linux-swap' - else: - fs_type = 'ext2' - create_command = ('mkpart primary {fs_type} {start} {end}' - .format(fs_type=fs_type, - start=str(self.get_start() + self.pad_start), - end=str(self.get_end() - self.pad_end))) - # Create the partition - log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, - '--', create_command]) + def _before_create(self, e): + """Creates the partition + """ + from bootstrapvz.common.tools import log_check_call + # The create command is fairly simple: + # - fs_type is the partition filesystem, as defined by parted: + # fs-type can be one of "fat16", "fat32", "ext2", "HFS", "linux-swap", + # "NTFS", "reiserfs", or "ufs". + # - start and end are just Bytes objects coerced into strings + if self.filesystem == 'swap': + fs_type = 'linux-swap' + else: + fs_type = 'ext2' + create_command = ('mkpart primary {fs_type} {start} {end}' + .format(fs_type=fs_type, + start=str(self.get_start() + self.pad_start), + end=str(self.get_end() - self.pad_end))) + # Create the partition + log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, + '--', create_command]) - # Set any flags on the partition - for flag in self.flags: - log_check_call(['parted', '--script', e.volume.device_path, - '--', ('set {idx} {flag} on' - .format(idx=str(self.get_index()), flag=flag))]) + # Set any flags on the partition + for flag in self.flags: + log_check_call(['parted', '--script', e.volume.device_path, + '--', ('set {idx} {flag} on' + .format(idx=str(self.get_index()), flag=flag))]) - def _before_map(self, e): - # Set the device path - self.device_path = e.device_path - if e.src == 'unmapped_fmt': - # Only link the uuid if the partition is formatted - self.link_uuid() + def _before_map(self, e): + # Set the device path + self.device_path = e.device_path + if e.src == 'unmapped_fmt': + # Only link the uuid if the partition is formatted + self.link_uuid() - def _after_format(self, e): - # We do this after formatting because there otherwise would be no UUID - self.link_uuid() + def _after_format(self, e): + # We do this after formatting because there otherwise would be no UUID + self.link_uuid() - def _before_unmap(self, e): - # When unmapped, the device_path information becomes invalid, so we delete it - self.device_path = None - if e.src == 'formatted': - self.unlink_uuid() + def _before_unmap(self, e): + # When unmapped, the device_path information becomes invalid, so we delete it + self.device_path = None + if e.src == 'formatted': + self.unlink_uuid() diff --git a/bootstrapvz/base/fs/partitions/gpt.py b/bootstrapvz/base/fs/partitions/gpt.py index df7ff6f..c8b1f31 100644 --- a/bootstrapvz/base/fs/partitions/gpt.py +++ b/bootstrapvz/base/fs/partitions/gpt.py @@ -3,24 +3,24 @@ from base import BasePartition class GPTPartition(BasePartition): - """Represents a GPT partition - """ + """Represents a GPT partition + """ - def __init__(self, size, filesystem, format_command, name, previous): - """ - :param Bytes size: Size of the partition - :param str filesystem: Filesystem the partition should be formatted with - :param list format_command: Optional format command, valid variables are fs, device_path and size - :param str name: The name of the partition - :param BasePartition previous: The partition that preceeds this one - """ - self.name = name - super(GPTPartition, self).__init__(size, filesystem, format_command, previous) + def __init__(self, size, filesystem, format_command, name, previous): + """ + :param Bytes size: Size of the partition + :param str filesystem: Filesystem the partition should be formatted with + :param list format_command: Optional format command, valid variables are fs, device_path and size + :param str name: The name of the partition + :param BasePartition previous: The partition that preceeds this one + """ + self.name = name + super(GPTPartition, self).__init__(size, filesystem, format_command, previous) - def _before_create(self, e): - # Create the partition and then set the name of the partition afterwards - super(GPTPartition, self)._before_create(e) - # partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical) - name_command = 'name {idx} {name}'.format(idx=self.get_index(), name=self.name) - log_check_call(['parted', '--script', e.volume.device_path, - '--', name_command]) + def _before_create(self, e): + # Create the partition and then set the name of the partition afterwards + super(GPTPartition, self)._before_create(e) + # partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical) + name_command = 'name {idx} {name}'.format(idx=self.get_index(), name=self.name) + log_check_call(['parted', '--script', e.volume.device_path, + '--', name_command]) diff --git a/bootstrapvz/base/fs/partitions/gpt_swap.py b/bootstrapvz/base/fs/partitions/gpt_swap.py index 0419444..3dd2c53 100644 --- a/bootstrapvz/base/fs/partitions/gpt_swap.py +++ b/bootstrapvz/base/fs/partitions/gpt_swap.py @@ -3,15 +3,15 @@ from gpt import GPTPartition class GPTSwapPartition(GPTPartition): - """Represents a GPT swap partition - """ + """Represents a GPT swap partition + """ - def __init__(self, size, previous): - """ - :param Bytes size: Size of the partition - :param BasePartition previous: The partition that preceeds this one - """ - super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous) + def __init__(self, size, previous): + """ + :param Bytes size: Size of the partition + :param BasePartition previous: The partition that preceeds this one + """ + super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous) - def _before_format(self, e): - log_check_call(['mkswap', self.device_path]) + def _before_format(self, e): + log_check_call(['mkswap', self.device_path]) diff --git a/bootstrapvz/base/fs/partitions/mount.py b/bootstrapvz/base/fs/partitions/mount.py index 7ac7e4b..5055e96 100644 --- a/bootstrapvz/base/fs/partitions/mount.py +++ b/bootstrapvz/base/fs/partitions/mount.py @@ -4,46 +4,46 @@ from bootstrapvz.common.tools import log_check_call class Mount(object): - """Represents a mount into the partition - """ - def __init__(self, source, destination, opts): - """ - :param str,AbstractPartition source: The path from where we mount or a partition - :param str destination: The path of the mountpoint - :param list opts: List of options to pass to the mount command - """ - self.source = source - self.destination = destination - self.opts = opts + """Represents a mount into the partition + """ + def __init__(self, source, destination, opts): + """ + :param str,AbstractPartition source: The path from where we mount or a partition + :param str destination: The path of the mountpoint + :param list opts: List of options to pass to the mount command + """ + self.source = source + self.destination = destination + self.opts = opts - def mount(self, prefix): - """Performs the mount operation or forwards it to another partition + def mount(self, prefix): + """Performs the mount operation or forwards it to another partition - :param str prefix: Path prefix of the mountpoint - """ - mount_dir = os.path.join(prefix, self.destination) - # If the source is another partition, we tell that partition to mount itself - if isinstance(self.source, AbstractPartition): - self.source.mount(destination=mount_dir) - else: - log_check_call(['mount'] + self.opts + [self.source, mount_dir]) - self.mount_dir = mount_dir + :param str prefix: Path prefix of the mountpoint + """ + mount_dir = os.path.join(prefix, self.destination) + # If the source is another partition, we tell that partition to mount itself + if isinstance(self.source, AbstractPartition): + self.source.mount(destination=mount_dir) + else: + log_check_call(['mount'] + self.opts + [self.source, mount_dir]) + self.mount_dir = mount_dir - def unmount(self): - """Performs the unmount operation or asks the partition to unmount itself - """ - # If its a partition, it can unmount itself - if isinstance(self.source, AbstractPartition): - self.source.unmount() - else: - log_check_call(['umount', self.mount_dir]) - del self.mount_dir + def unmount(self): + """Performs the unmount operation or asks the partition to unmount itself + """ + # If its a partition, it can unmount itself + if isinstance(self.source, AbstractPartition): + self.source.unmount() + else: + log_check_call(['umount', self.mount_dir]) + del self.mount_dir - def __getstate__(self): - state = self.__dict__.copy() - state['__class__'] = self.__module__ + '.' + self.__class__.__name__ - return state + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/base/fs/partitions/msdos.py b/bootstrapvz/base/fs/partitions/msdos.py index cb7d96d..ad1dd6d 100644 --- a/bootstrapvz/base/fs/partitions/msdos.py +++ b/bootstrapvz/base/fs/partitions/msdos.py @@ -2,6 +2,6 @@ from base import BasePartition class MSDOSPartition(BasePartition): - """Represents an MS-DOS partition - """ - pass + """Represents an MS-DOS partition + """ + pass diff --git a/bootstrapvz/base/fs/partitions/msdos_swap.py b/bootstrapvz/base/fs/partitions/msdos_swap.py index 5c01f68..24c40d2 100644 --- a/bootstrapvz/base/fs/partitions/msdos_swap.py +++ b/bootstrapvz/base/fs/partitions/msdos_swap.py @@ -3,15 +3,15 @@ from msdos import MSDOSPartition class MSDOSSwapPartition(MSDOSPartition): - """Represents a MS-DOS swap partition - """ + """Represents a MS-DOS swap partition + """ - def __init__(self, size, previous): - """ - :param Bytes size: Size of the partition - :param BasePartition previous: The partition that preceeds this one - """ - super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous) + def __init__(self, size, previous): + """ + :param Bytes size: Size of the partition + :param BasePartition previous: The partition that preceeds this one + """ + super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous) - def _before_format(self, e): - log_check_call(['mkswap', self.device_path]) + def _before_format(self, e): + log_check_call(['mkswap', self.device_path]) diff --git a/bootstrapvz/base/fs/partitions/single.py b/bootstrapvz/base/fs/partitions/single.py index e10b74c..dfe0c2c 100644 --- a/bootstrapvz/base/fs/partitions/single.py +++ b/bootstrapvz/base/fs/partitions/single.py @@ -2,14 +2,14 @@ from abstract import AbstractPartition class SinglePartition(AbstractPartition): - """Represents a single virtual partition on an unpartitioned volume - """ + """Represents a single virtual partition on an unpartitioned volume + """ - def get_start(self): - """Gets the starting byte of this partition + def get_start(self): + """Gets the starting byte of this partition - :return: The starting byte of this partition - :rtype: Sectors - """ - from bootstrapvz.common.sectors import Sectors - return Sectors(0, self.size.sector_size) + :return: The starting byte of this partition + :rtype: Sectors + """ + from bootstrapvz.common.sectors import Sectors + return Sectors(0, self.size.sector_size) diff --git a/bootstrapvz/base/fs/partitions/unformatted.py b/bootstrapvz/base/fs/partitions/unformatted.py index 39a146e..73af32d 100644 --- a/bootstrapvz/base/fs/partitions/unformatted.py +++ b/bootstrapvz/base/fs/partitions/unformatted.py @@ -2,19 +2,19 @@ from base import BasePartition class UnformattedPartition(BasePartition): - """Represents an unformatted partition - It cannot be mounted - """ + """Represents an unformatted partition + It cannot be mounted + """ - # The states for our state machine. It can only be mapped, not mounted. - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, - {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, - {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, - ] + # The states for our state machine. It can only be mapped, not mounted. + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'}, + {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'}, + {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'}, + ] - def __init__(self, size, previous): - """ - :param Bytes size: Size of the partition - :param BasePartition previous: The partition that preceeds this one - """ - super(UnformattedPartition, self).__init__(size, None, None, previous) + def __init__(self, size, previous): + """ + :param Bytes size: Size of the partition + :param BasePartition previous: The partition that preceeds this one + """ + super(UnformattedPartition, self).__init__(size, None, None, previous) diff --git a/bootstrapvz/base/fs/volume.py b/bootstrapvz/base/fs/volume.py index 38d4991..a9e4cd8 100644 --- a/bootstrapvz/base/fs/volume.py +++ b/bootstrapvz/base/fs/volume.py @@ -6,131 +6,131 @@ from partitionmaps.none import NoPartitions class Volume(FSMProxy): - """Represents an abstract volume. - This class is a finite state machine and represents the state of the real volume. - """ + """Represents an abstract volume. + This class is a finite state machine and represents the state of the real volume. + """ - __metaclass__ = ABCMeta + __metaclass__ = ABCMeta - # States this volume can be in - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'}, - {'name': 'attach', 'src': 'detached', 'dst': 'attached'}, - {'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'}, - {'name': 'unlink_dm_node', 'src': 'linked', 'dst': 'attached'}, - {'name': 'detach', 'src': 'attached', 'dst': 'detached'}, - {'name': 'delete', 'src': 'detached', 'dst': 'deleted'}, - ] + # States this volume can be in + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'}, + {'name': 'attach', 'src': 'detached', 'dst': 'attached'}, + {'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'}, + {'name': 'unlink_dm_node', 'src': 'linked', 'dst': 'attached'}, + {'name': 'detach', 'src': 'attached', 'dst': 'detached'}, + {'name': 'delete', 'src': 'detached', 'dst': 'deleted'}, + ] - def __init__(self, partition_map): - """ - :param PartitionMap partition_map: The partition map for the volume - """ - # Path to the volume - self.device_path = None - # The partition map - self.partition_map = partition_map - # The size of the volume as reported by the partition map - self.size = self.partition_map.get_total_size() + def __init__(self, partition_map): + """ + :param PartitionMap partition_map: The partition map for the volume + """ + # Path to the volume + self.device_path = None + # The partition map + self.partition_map = partition_map + # The size of the volume as reported by the partition map + self.size = self.partition_map.get_total_size() - # Before detaching, check that nothing would block the detachment - callbacks = {'onbeforedetach': self._check_blocking} - if isinstance(self.partition_map, NoPartitions): - # When the volume has no partitions, the virtual root partition path is equal to that of the volume - # Update that path whenever the path to the volume changes - def set_dev_path(e): - self.partition_map.root.device_path = self.device_path - callbacks['onafterattach'] = set_dev_path - callbacks['onafterdetach'] = set_dev_path # Will become None - callbacks['onlink_dm_node'] = set_dev_path - callbacks['onunlink_dm_node'] = set_dev_path + # Before detaching, check that nothing would block the detachment + callbacks = {'onbeforedetach': self._check_blocking} + if isinstance(self.partition_map, NoPartitions): + # When the volume has no partitions, the virtual root partition path is equal to that of the volume + # Update that path whenever the path to the volume changes + def set_dev_path(e): + self.partition_map.root.device_path = self.device_path + callbacks['onafterattach'] = set_dev_path + callbacks['onafterdetach'] = set_dev_path # Will become None + callbacks['onlink_dm_node'] = set_dev_path + callbacks['onunlink_dm_node'] = set_dev_path - # Create the configuration for our finite state machine - cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks} - super(Volume, self).__init__(cfg) + # Create the configuration for our finite state machine + cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks} + super(Volume, self).__init__(cfg) - def _after_create(self, e): - if isinstance(self.partition_map, NoPartitions): - # When the volume has no partitions, the virtual root partition - # is essentially created when the volume is created, forward that creation event. - self.partition_map.root.create() + def _after_create(self, e): + if isinstance(self.partition_map, NoPartitions): + # When the volume has no partitions, the virtual root partition + # is essentially created when the volume is created, forward that creation event. + self.partition_map.root.create() - def _check_blocking(self, e): - """Checks whether the volume is blocked + def _check_blocking(self, e): + """Checks whether the volume is blocked - :raises VolumeError: When the volume is blocked from being detached - """ - # Only the partition map can block the volume - if self.partition_map.is_blocking(): - raise VolumeError('The partitionmap prevents the detach procedure') + :raises VolumeError: When the volume is blocked from being detached + """ + # Only the partition map can block the volume + if self.partition_map.is_blocking(): + raise VolumeError('The partitionmap prevents the detach procedure') - def _before_link_dm_node(self, e): - """Links the volume using the device mapper - This allows us to create a 'window' into the volume that acts like a volume in itself. - Mainly it is used to fool grub into thinking that it is working with a real volume, - rather than a loopback device or a network block device. + def _before_link_dm_node(self, e): + """Links the volume using the device mapper + This allows us to create a 'window' into the volume that acts like a volume in itself. + Mainly it is used to fool grub into thinking that it is working with a real volume, + rather than a loopback device or a network block device. - :param _e_obj e: Event object containing arguments to create() + :param _e_obj e: Event object containing arguments to create() - Keyword arguments to link_dm_node() are: + Keyword arguments to link_dm_node() are: - :param int logical_start_sector: The sector the volume should start at in the new volume - :param int start_sector: The offset at which the volume should begin to be mapped in the new volume - :param int sectors: The number of sectors that should be mapped + :param int logical_start_sector: The sector the volume should start at in the new volume + :param int start_sector: The offset at which the volume should begin to be mapped in the new volume + :param int sectors: The number of sectors that should be mapped - Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en + Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en - :raises VolumeError: When a free block device cannot be found. - """ - import os.path - from bootstrapvz.common.fs import get_partitions - # Fetch information from /proc/partitions - proc_partitions = get_partitions() - device_name = os.path.basename(self.device_path) - device_partition = proc_partitions[device_name] + :raises VolumeError: When a free block device cannot be found. + """ + import os.path + from bootstrapvz.common.fs import get_partitions + # Fetch information from /proc/partitions + proc_partitions = get_partitions() + device_name = os.path.basename(self.device_path) + device_partition = proc_partitions[device_name] - # The sector the volume should start at in the new volume - logical_start_sector = getattr(e, 'logical_start_sector', 0) + # The sector the volume should start at in the new volume + logical_start_sector = getattr(e, 'logical_start_sector', 0) - # The offset at which the volume should begin to be mapped in the new volume - start_sector = getattr(e, 'start_sector', 0) + # The offset at which the volume should begin to be mapped in the new volume + start_sector = getattr(e, 'start_sector', 0) - # The number of sectors that should be mapped - sectors = getattr(e, 'sectors', int(self.size) - start_sector) + # The number of sectors that should be mapped + sectors = getattr(e, 'sectors', int(self.size) - start_sector) - # This is the table we send to dmsetup, so that it may create a device mapping for us. - table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}' - .format(log_start_sec=logical_start_sector, - sectors=sectors, - major=device_partition['major'], - minor=device_partition['minor'], - start_sec=start_sector)) - import string - import os.path - # Figure out the device letter and path - for letter in string.ascii_lowercase: - dev_name = 'vd' + letter - dev_path = os.path.join('/dev/mapper', dev_name) - if not os.path.exists(dev_path): - self.dm_node_name = dev_name - self.dm_node_path = dev_path - break + # This is the table we send to dmsetup, so that it may create a device mapping for us. + table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}' + .format(log_start_sec=logical_start_sector, + sectors=sectors, + major=device_partition['major'], + minor=device_partition['minor'], + start_sec=start_sector)) + import string + import os.path + # Figure out the device letter and path + for letter in string.ascii_lowercase: + dev_name = 'vd' + letter + dev_path = os.path.join('/dev/mapper', dev_name) + if not os.path.exists(dev_path): + self.dm_node_name = dev_name + self.dm_node_path = dev_path + break - if not hasattr(self, 'dm_node_name'): - raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') + if not hasattr(self, 'dm_node_name'): + raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') - # Create the device mapping - log_check_call(['dmsetup', 'create', self.dm_node_name], table) - # Update the device_path but remember the old one for when we unlink the volume again - self.unlinked_device_path = self.device_path - self.device_path = self.dm_node_path + # Create the device mapping + log_check_call(['dmsetup', 'create', self.dm_node_name], table) + # Update the device_path but remember the old one for when we unlink the volume again + self.unlinked_device_path = self.device_path + self.device_path = self.dm_node_path - def _before_unlink_dm_node(self, e): - """Unlinks the device mapping - """ - log_check_call(['dmsetup', 'remove', self.dm_node_name]) - # Reset the device_path - self.device_path = self.unlinked_device_path - # Delete the no longer valid information - del self.unlinked_device_path - del self.dm_node_name - del self.dm_node_path + def _before_unlink_dm_node(self, e): + """Unlinks the device mapping + """ + log_check_call(['dmsetup', 'remove', self.dm_node_name]) + # Reset the device_path + self.device_path = self.unlinked_device_path + # Delete the no longer valid information + del self.unlinked_device_path + del self.dm_node_name + del self.dm_node_path diff --git a/bootstrapvz/base/log.py b/bootstrapvz/base/log.py index 8b01b04..5c9a441 100644 --- a/bootstrapvz/base/log.py +++ b/bootstrapvz/base/log.py @@ -5,100 +5,100 @@ import logging def get_console_handler(debug, colorize): - """Returns a log handler for the console - The handler color codes the different log levels + """Returns a log handler for the console + The handler color codes the different log levels - :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) - :params bool colorize: Whether to colorize console output - :return: The console logging handler - """ - # Create a console log handler - import sys - console_handler = logging.StreamHandler(sys.stderr) - if colorize: - # We want to colorize the output to the console, so we add a formatter - console_handler.setFormatter(ColorFormatter()) - # Set the log level depending on the debug argument - if debug: - console_handler.setLevel(logging.DEBUG) - else: - console_handler.setLevel(logging.INFO) - return console_handler + :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) + :params bool colorize: Whether to colorize console output + :return: The console logging handler + """ + # Create a console log handler + import sys + console_handler = logging.StreamHandler(sys.stderr) + if colorize: + # We want to colorize the output to the console, so we add a formatter + console_handler.setFormatter(ColorFormatter()) + # Set the log level depending on the debug argument + if debug: + console_handler.setLevel(logging.DEBUG) + else: + console_handler.setLevel(logging.INFO) + return console_handler def get_file_handler(path, debug): - """Returns a log handler for the given path - If the parent directory of the logpath does not exist it will be created - The handler outputs relative timestamps (to when it was created) + """Returns a log handler for the given path + If the parent directory of the logpath does not exist it will be created + The handler outputs relative timestamps (to when it was created) - :params str path: The full path to the logfile - :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) - :return: The file logging handler - """ - import os.path - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - # Create the log handler - file_handler = logging.FileHandler(path) - # Absolute timestamps are rather useless when bootstrapping, it's much more interesting - # to see how long things take, so we log in a relative format instead - file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s')) - # The file log handler always logs everything - file_handler.setLevel(logging.DEBUG) - return file_handler + :params str path: The full path to the logfile + :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) + :return: The file logging handler + """ + import os.path + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + # Create the log handler + file_handler = logging.FileHandler(path) + # Absolute timestamps are rather useless when bootstrapping, it's much more interesting + # to see how long things take, so we log in a relative format instead + file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s')) + # The file log handler always logs everything + file_handler.setLevel(logging.DEBUG) + return file_handler def get_log_filename(manifest_path): - """Returns the path to a logfile given a manifest - The logfile name is constructed from the current timestamp and the basename of the manifest + """Returns the path to a logfile given a manifest + The logfile name is constructed from the current timestamp and the basename of the manifest - :param str manifest_path: The path to the manifest - :return: The path to the logfile - :rtype: str - """ - import os.path - from datetime import datetime + :param str manifest_path: The path to the manifest + :return: The path to the logfile + :rtype: str + """ + import os.path + from datetime import datetime - manifest_basename = os.path.basename(manifest_path) - manifest_name, _ = os.path.splitext(manifest_basename) - timestamp = datetime.now().strftime('%Y%m%d%H%M%S') - filename = '{timestamp}_{name}.log'.format(timestamp=timestamp, name=manifest_name) - return filename + manifest_basename = os.path.basename(manifest_path) + manifest_name, _ = os.path.splitext(manifest_basename) + timestamp = datetime.now().strftime('%Y%m%d%H%M%S') + filename = '{timestamp}_{name}.log'.format(timestamp=timestamp, name=manifest_name) + return filename class SourceFormatter(logging.Formatter): - """Adds a [source] tag to the log message if it exists - The python docs suggest using a LoggingAdapter, but that would mean we'd - have to use it everywhere we log something (and only when called remotely), - which is not feasible. - """ + """Adds a [source] tag to the log message if it exists + The python docs suggest using a LoggingAdapter, but that would mean we'd + have to use it everywhere we log something (and only when called remotely), + which is not feasible. + """ - def format(self, record): - extra = getattr(record, 'extra', {}) - if 'source' in extra: - record.msg = '[{source}] {message}'.format(source=record.extra['source'], - message=record.msg) - return super(SourceFormatter, self).format(record) + def format(self, record): + extra = getattr(record, 'extra', {}) + if 'source' in extra: + record.msg = '[{source}] {message}'.format(source=record.extra['source'], + message=record.msg) + return super(SourceFormatter, self).format(record) class ColorFormatter(SourceFormatter): - """Colorizes log messages depending on the loglevel - """ - level_colors = {logging.ERROR: 'red', - logging.WARNING: 'magenta', - logging.INFO: 'blue', - } + """Colorizes log messages depending on the loglevel + """ + level_colors = {logging.ERROR: 'red', + logging.WARNING: 'magenta', + logging.INFO: 'blue', + } - def format(self, record): - # Colorize the message if we have a color for it (DEBUG has no color) - from termcolor import colored - record.msg = colored(record.msg, self.level_colors.get(record.levelno, None)) - return super(ColorFormatter, self).format(record) + def format(self, record): + # Colorize the message if we have a color for it (DEBUG has no color) + from termcolor import colored + record.msg = colored(record.msg, self.level_colors.get(record.levelno, None)) + return super(ColorFormatter, self).format(record) class FileFormatter(SourceFormatter): - """Formats log statements for output to file - Currently this is just a stub - """ - def format(self, record): - return super(FileFormatter, self).format(record) + """Formats log statements for output to file + Currently this is just a stub + """ + def format(self, record): + return super(FileFormatter, self).format(record) diff --git a/bootstrapvz/base/main.py b/bootstrapvz/base/main.py index 74d06aa..192969c 100644 --- a/bootstrapvz/base/main.py +++ b/bootstrapvz/base/main.py @@ -3,37 +3,37 @@ def main(): - """Main function for invoking the bootstrap process + """Main function for invoking the bootstrap process - :raises Exception: When the invoking user is not root and --dry-run isn't specified - """ - # Get the commandline arguments - opts = get_opts() + :raises Exception: When the invoking user is not root and --dry-run isn't specified + """ + # Get the commandline arguments + opts = get_opts() - # Require root privileges, except when doing a dry-run where they aren't needed - import os - if os.geteuid() != 0 and not opts['--dry-run']: - raise Exception('This program requires root privileges.') + # Require root privileges, except when doing a dry-run where they aren't needed + import os + if os.geteuid() != 0 and not opts['--dry-run']: + raise Exception('This program requires root privileges.') - # Set up logging - setup_loggers(opts) + # Set up logging + setup_loggers(opts) - # Load the manifest - from manifest import Manifest - manifest = Manifest(path=opts['MANIFEST']) + # Load the manifest + from manifest import Manifest + manifest = Manifest(path=opts['MANIFEST']) - # Everything has been set up, begin the bootstrapping process - run(manifest, - debug=opts['--debug'], - pause_on_error=opts['--pause-on-error'], - dry_run=opts['--dry-run']) + # Everything has been set up, begin the bootstrapping process + run(manifest, + debug=opts['--debug'], + pause_on_error=opts['--pause-on-error'], + dry_run=opts['--dry-run']) def get_opts(): - """Creates an argument parser and returns the arguments it has parsed - """ - import docopt - usage = """bootstrap-vz + """Creates an argument parser and returns the arguments it has parsed + """ + import docopt + usage = """bootstrap-vz Usage: bootstrap-vz [options] MANIFEST @@ -46,97 +46,97 @@ Options: Colorize the console output [default: auto] --debug Print debugging information -h, --help show this help - """ - opts = docopt.docopt(usage) - if opts['--color'] not in ('auto', 'always', 'never'): - raise docopt.DocoptExit('Value of --color must be one of auto, always or never.') - return opts + """ + opts = docopt.docopt(usage) + if opts['--color'] not in ('auto', 'always', 'never'): + raise docopt.DocoptExit('Value of --color must be one of auto, always or never.') + return opts def setup_loggers(opts): - """Sets up the file and console loggers + """Sets up the file and console loggers - :params dict opts: Dictionary of options from the commandline - """ - import logging - root = logging.getLogger() - root.setLevel(logging.NOTSET) + :params dict opts: Dictionary of options from the commandline + """ + import logging + root = logging.getLogger() + root.setLevel(logging.NOTSET) - import log - # Log to file unless --log is a single dash - if opts['--log'] != '-': - import os.path - log_filename = log.get_log_filename(opts['MANIFEST']) - logpath = os.path.join(opts['--log'], log_filename) - file_handler = log.get_file_handler(path=logpath, debug=True) - root.addHandler(file_handler) + import log + # Log to file unless --log is a single dash + if opts['--log'] != '-': + import os.path + log_filename = log.get_log_filename(opts['MANIFEST']) + logpath = os.path.join(opts['--log'], log_filename) + file_handler = log.get_file_handler(path=logpath, debug=True) + root.addHandler(file_handler) - if opts['--color'] == 'never': - colorize = False - elif opts['--color'] == 'always': - colorize = True - else: - # If --color=auto (default), decide whether to colorize by whether stderr is a tty. - import os - colorize = os.isatty(2) - console_handler = log.get_console_handler(debug=opts['--debug'], colorize=colorize) - root.addHandler(console_handler) + if opts['--color'] == 'never': + colorize = False + elif opts['--color'] == 'always': + colorize = True + else: + # If --color=auto (default), decide whether to colorize by whether stderr is a tty. + import os + colorize = os.isatty(2) + console_handler = log.get_console_handler(debug=opts['--debug'], colorize=colorize) + root.addHandler(console_handler) def run(manifest, debug=False, pause_on_error=False, dry_run=False): - """Runs the bootstrapping process + """Runs the bootstrapping process - :params Manifest manifest: The manifest to run the bootstrapping process for - :params bool debug: Whether to turn debugging mode on - :params bool pause_on_error: Whether to pause on error, before rollback - :params bool dry_run: Don't actually run the tasks - """ - # Get the tasklist - from tasklist import load_tasks - from tasklist import TaskList - tasks = load_tasks('resolve_tasks', manifest) - tasklist = TaskList(tasks) - # 'resolve_tasks' is the name of the function to call on the provider and plugins + :params Manifest manifest: The manifest to run the bootstrapping process for + :params bool debug: Whether to turn debugging mode on + :params bool pause_on_error: Whether to pause on error, before rollback + :params bool dry_run: Don't actually run the tasks + """ + # Get the tasklist + from tasklist import load_tasks + from tasklist import TaskList + tasks = load_tasks('resolve_tasks', manifest) + tasklist = TaskList(tasks) + # 'resolve_tasks' is the name of the function to call on the provider and plugins - # Create the bootstrap information object that'll be used throughout the bootstrapping process - from bootstrapinfo import BootstrapInformation - bootstrap_info = BootstrapInformation(manifest=manifest, debug=debug) + # Create the bootstrap information object that'll be used throughout the bootstrapping process + from bootstrapinfo import BootstrapInformation + bootstrap_info = BootstrapInformation(manifest=manifest, debug=debug) - import logging - log = logging.getLogger(__name__) - try: - # Run all the tasks the tasklist has gathered - tasklist.run(info=bootstrap_info, dry_run=dry_run) - # We're done! :-) - log.info('Successfully completed bootstrapping') - except (Exception, KeyboardInterrupt) as e: - # When an error occurs, log it and begin rollback - log.exception(e) - if pause_on_error: - # The --pause-on-error is useful when the user wants to inspect the volume before rollback - raw_input('Press Enter to commence rollback') - log.error('Rolling back') + import logging + log = logging.getLogger(__name__) + try: + # Run all the tasks the tasklist has gathered + tasklist.run(info=bootstrap_info, dry_run=dry_run) + # We're done! :-) + log.info('Successfully completed bootstrapping') + except (Exception, KeyboardInterrupt) as e: + # When an error occurs, log it and begin rollback + log.exception(e) + if pause_on_error: + # The --pause-on-error is useful when the user wants to inspect the volume before rollback + raw_input('Press Enter to commence rollback') + log.error('Rolling back') - # Create a useful little function for the provider and plugins to use, - # when figuring out what tasks should be added to the rollback list. - def counter_task(taskset, task, counter): - """counter_task() adds the third argument to the rollback tasklist - if the second argument is present in the list of completed tasks + # Create a useful little function for the provider and plugins to use, + # when figuring out what tasks should be added to the rollback list. + def counter_task(taskset, task, counter): + """counter_task() adds the third argument to the rollback tasklist + if the second argument is present in the list of completed tasks - :param set taskset: The taskset to add the rollback task to - :param Task task: The task to look for in the completed tasks list - :param Task counter: The task to add to the rollback tasklist - """ - if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed: - taskset.add(counter) + :param set taskset: The taskset to add the rollback task to + :param Task task: The task to look for in the completed tasks list + :param Task counter: The task to add to the rollback tasklist + """ + if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed: + taskset.add(counter) - # Ask the provider and plugins for tasks they'd like to add to the rollback tasklist - # Any additional arguments beyond the first two are passed directly to the provider and plugins - rollback_tasks = load_tasks('resolve_rollback_tasks', manifest, tasklist.tasks_completed, counter_task) - rollback_tasklist = TaskList(rollback_tasks) + # Ask the provider and plugins for tasks they'd like to add to the rollback tasklist + # Any additional arguments beyond the first two are passed directly to the provider and plugins + rollback_tasks = load_tasks('resolve_rollback_tasks', manifest, tasklist.tasks_completed, counter_task) + rollback_tasklist = TaskList(rollback_tasks) - # Run the rollback tasklist - rollback_tasklist.run(info=bootstrap_info, dry_run=dry_run) - log.info('Successfully completed rollback') - raise - return bootstrap_info + # Run the rollback tasklist + rollback_tasklist.run(info=bootstrap_info, dry_run=dry_run) + log.info('Successfully completed rollback') + raise + return bootstrap_info diff --git a/bootstrapvz/base/manifest.py b/bootstrapvz/base/manifest.py index cd7acaa..c83dfd6 100644 --- a/bootstrapvz/base/manifest.py +++ b/bootstrapvz/base/manifest.py @@ -9,150 +9,150 @@ log = logging.getLogger(__name__) class Manifest(object): - """This class holds all the information that providers and plugins need - to perform the bootstrapping process. All actions that are taken originate from - here. The manifest shall not be modified after it has been loaded. - Currently, immutability is not enforced and it would require a fair amount of code - to enforce it, instead we just rely on tasks behaving properly. - """ + """This class holds all the information that providers and plugins need + to perform the bootstrapping process. All actions that are taken originate from + here. The manifest shall not be modified after it has been loaded. + Currently, immutability is not enforced and it would require a fair amount of code + to enforce it, instead we just rely on tasks behaving properly. + """ - def __init__(self, path=None, data=None): - """Initializer: Given a path we load, validate and parse the manifest. - To create the manifest from dynamic data instead of the contents of a file, - provide a properly constructed dict as the data argument. + def __init__(self, path=None, data=None): + """Initializer: Given a path we load, validate and parse the manifest. + To create the manifest from dynamic data instead of the contents of a file, + provide a properly constructed dict as the data argument. - :param str path: The path to the manifest (ignored, when `data' is provided) - :param str data: The manifest data, if it is not None, it will be used instead of the contents of `path' - """ - if path is None and data is None: - raise ManifestError('`path\' or `data\' must be provided') - self.path = path + :param str path: The path to the manifest (ignored, when `data' is provided) + :param str data: The manifest data, if it is not None, it will be used instead of the contents of `path' + """ + if path is None and data is None: + raise ManifestError('`path\' or `data\' must be provided') + self.path = path - import os.path - self.metaschema = load_data(os.path.normpath(os.path.join(os.path.dirname(__file__), - 'metaschema.json'))) + import os.path + self.metaschema = load_data(os.path.normpath(os.path.join(os.path.dirname(__file__), + 'metaschema.json'))) - self.load_data(data) - self.load_modules() - self.validate() - self.parse() + self.load_data(data) + self.load_modules() + self.validate() + self.parse() - def load_data(self, data=None): - """Loads the manifest and performs a basic validation. - This function reads the manifest and performs some basic validation of - the manifest itself to ensure that the properties required for initalization are accessible - (otherwise the user would be presented with some cryptic error messages). - """ - if data is None: - self.data = load_data(self.path) - else: - self.data = data + def load_data(self, data=None): + """Loads the manifest and performs a basic validation. + This function reads the manifest and performs some basic validation of + the manifest itself to ensure that the properties required for initalization are accessible + (otherwise the user would be presented with some cryptic error messages). + """ + if data is None: + self.data = load_data(self.path) + else: + self.data = data - from . import validate_manifest - # Validate the manifest with the base validation function in __init__ - validate_manifest(self.data, self.schema_validator, self.validation_error) + from . import validate_manifest + # Validate the manifest with the base validation function in __init__ + validate_manifest(self.data, self.schema_validator, self.validation_error) - def load_modules(self): - """Loads the provider and the plugins. - """ - # Get the provider name from the manifest and load the corresponding module - provider_modname = 'bootstrapvz.providers.' + self.data['provider']['name'] - log.debug('Loading provider ' + self.data['provider']['name']) - # Create a modules dict that contains the loaded provider and plugins - import importlib - self.modules = {'provider': importlib.import_module(provider_modname), - 'plugins': [], - } - # Run through all the plugins mentioned in the manifest and load them - from pkg_resources import iter_entry_points - if 'plugins' in self.data: - for plugin_name in self.data['plugins'].keys(): - log.debug('Loading plugin ' + plugin_name) - try: - # Internal bootstrap-vz plugins take precedence wrt. plugin name - modname = 'bootstrapvz.plugins.' + plugin_name - plugin = importlib.import_module(modname) - except ImportError: - entry_points = list(iter_entry_points('bootstrapvz.plugins', name=plugin_name)) - num_entry_points = len(entry_points) - if num_entry_points < 1: - raise - if num_entry_points > 1: - msg = ('Unable to load plugin {name}, ' - 'there are {num} entry points to choose from.' - .format(name=plugin_name, num=num_entry_points)) - raise ImportError(msg) - plugin = entry_points[0].load() - self.modules['plugins'].append(plugin) + def load_modules(self): + """Loads the provider and the plugins. + """ + # Get the provider name from the manifest and load the corresponding module + provider_modname = 'bootstrapvz.providers.' + self.data['provider']['name'] + log.debug('Loading provider ' + self.data['provider']['name']) + # Create a modules dict that contains the loaded provider and plugins + import importlib + self.modules = {'provider': importlib.import_module(provider_modname), + 'plugins': [], + } + # Run through all the plugins mentioned in the manifest and load them + from pkg_resources import iter_entry_points + if 'plugins' in self.data: + for plugin_name in self.data['plugins'].keys(): + log.debug('Loading plugin ' + plugin_name) + try: + # Internal bootstrap-vz plugins take precedence wrt. plugin name + modname = 'bootstrapvz.plugins.' + plugin_name + plugin = importlib.import_module(modname) + except ImportError: + entry_points = list(iter_entry_points('bootstrapvz.plugins', name=plugin_name)) + num_entry_points = len(entry_points) + if num_entry_points < 1: + raise + if num_entry_points > 1: + msg = ('Unable to load plugin {name}, ' + 'there are {num} entry points to choose from.' + .format(name=plugin_name, num=num_entry_points)) + raise ImportError(msg) + plugin = entry_points[0].load() + self.modules['plugins'].append(plugin) - def validate(self): - """Validates the manifest using the provider and plugin validation functions. - Plugins are not required to have a validate_manifest function - """ + def validate(self): + """Validates the manifest using the provider and plugin validation functions. + Plugins are not required to have a validate_manifest function + """ - # Run the provider validation - self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error) - # Run the validation function for any plugin that has it - for plugin in self.modules['plugins']: - validate = getattr(plugin, 'validate_manifest', None) - if callable(validate): - validate(self.data, self.schema_validator, self.validation_error) + # Run the provider validation + self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error) + # Run the validation function for any plugin that has it + for plugin in self.modules['plugins']: + validate = getattr(plugin, 'validate_manifest', None) + if callable(validate): + validate(self.data, self.schema_validator, self.validation_error) - def parse(self): - """Parses the manifest. - Well... "parsing" is a big word. - The function really just sets up some convenient attributes so that tasks - don't have to access information with info.manifest.data['section'] - but can do it with info.manifest.section. - """ - self.name = self.data['name'] - self.provider = self.data['provider'] - self.bootstrapper = self.data['bootstrapper'] - self.volume = self.data['volume'] - self.system = self.data['system'] - from bootstrapvz.common.releases import get_release - self.release = get_release(self.system['release']) - # The packages and plugins section is not required - self.packages = self.data['packages'] if 'packages' in self.data else {} - self.plugins = self.data['plugins'] if 'plugins' in self.data else {} + def parse(self): + """Parses the manifest. + Well... "parsing" is a big word. + The function really just sets up some convenient attributes so that tasks + don't have to access information with info.manifest.data['section'] + but can do it with info.manifest.section. + """ + self.name = self.data['name'] + self.provider = self.data['provider'] + self.bootstrapper = self.data['bootstrapper'] + self.volume = self.data['volume'] + self.system = self.data['system'] + from bootstrapvz.common.releases import get_release + self.release = get_release(self.system['release']) + # The packages and plugins section is not required + self.packages = self.data['packages'] if 'packages' in self.data else {} + self.plugins = self.data['plugins'] if 'plugins' in self.data else {} - def schema_validator(self, data, schema_path): - """This convenience function is passed around to all the validation functions - so that they may run a json-schema validation by giving it the data and a path to the schema. + def schema_validator(self, data, schema_path): + """This convenience function is passed around to all the validation functions + so that they may run a json-schema validation by giving it the data and a path to the schema. - :param dict data: Data to validate (normally the manifest data) - :param str schema_path: Path to the json-schema to use for validation - """ - import jsonschema + :param dict data: Data to validate (normally the manifest data) + :param str schema_path: Path to the json-schema to use for validation + """ + import jsonschema - schema = load_data(schema_path) + schema = load_data(schema_path) - try: - jsonschema.validate(schema, self.metaschema) - jsonschema.validate(data, schema) - except jsonschema.ValidationError as e: - self.validation_error(e.message, e.path) + try: + jsonschema.validate(schema, self.metaschema) + jsonschema.validate(data, schema) + except jsonschema.ValidationError as e: + self.validation_error(e.message, e.path) - def validation_error(self, message, data_path=None): - """This function is passed to all validation functions so that they may - raise a validation error because a custom validation of the manifest failed. + def validation_error(self, message, data_path=None): + """This function is passed to all validation functions so that they may + raise a validation error because a custom validation of the manifest failed. - :param str message: Message to user about the error - :param list data_path: A path to the location in the manifest where the error occurred - :raises ManifestError: With absolute certainty - """ - raise ManifestError(message, self.path, data_path) + :param str message: Message to user about the error + :param list data_path: A path to the location in the manifest where the error occurred + :raises ManifestError: With absolute certainty + """ + raise ManifestError(message, self.path, data_path) - def __getstate__(self): - return {'__class__': self.__module__ + '.' + self.__class__.__name__, - 'path': self.path, - 'metaschema': self.metaschema, - 'data': self.data} + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'path': self.path, + 'metaschema': self.metaschema, + 'data': self.data} - def __setstate__(self, state): - self.path = state['path'] - self.metaschema = state['metaschema'] - self.load_data(state['data']) - self.load_modules() - self.validate() - self.parse() + def __setstate__(self, state): + self.path = state['path'] + self.metaschema = state['metaschema'] + self.load_data(state['data']) + self.load_modules() + self.validate() + self.parse() diff --git a/bootstrapvz/base/phase.py b/bootstrapvz/base/phase.py index bc3efdb..6113279 100644 --- a/bootstrapvz/base/phase.py +++ b/bootstrapvz/base/phase.py @@ -1,35 +1,35 @@ class Phase(object): - """The Phase class represents a phase a task may be in. - It has no function other than to act as an anchor in the task graph. - All phases are instantiated in common.phases - """ + """The Phase class represents a phase a task may be in. + It has no function other than to act as an anchor in the task graph. + All phases are instantiated in common.phases + """ - def __init__(self, name, description): - # The name of the phase - self.name = name - # The description of the phase (currently not used anywhere) - self.description = description + def __init__(self, name, description): + # The name of the phase + self.name = name + # The description of the phase (currently not used anywhere) + self.description = description - def pos(self): - """Gets the position of the phase + def pos(self): + """Gets the position of the phase - :return: The positional index of the phase in relation to the other phases - :rtype: int - """ - from bootstrapvz.common.phases import order - return next(i for i, phase in enumerate(order) if phase is self) + :return: The positional index of the phase in relation to the other phases + :rtype: int + """ + from bootstrapvz.common.phases import order + return next(i for i, phase in enumerate(order) if phase is self) - def __cmp__(self, other): - """Compares the phase order in relation to the other phases - :return int: - """ - return self.pos() - other.pos() + def __cmp__(self, other): + """Compares the phase order in relation to the other phases + :return int: + """ + return self.pos() - other.pos() - def __str__(self): - """ - :return: String representation of the phase - :rtype: str - """ - return self.name + def __str__(self): + """ + :return: String representation of the phase + :rtype: str + """ + return self.name diff --git a/bootstrapvz/base/pkg/exceptions.py b/bootstrapvz/base/pkg/exceptions.py index dc7534b..244edeb 100644 --- a/bootstrapvz/base/pkg/exceptions.py +++ b/bootstrapvz/base/pkg/exceptions.py @@ -1,12 +1,12 @@ class PackageError(Exception): - """Raised when an error occurrs while handling the packageslist - """ - pass + """Raised when an error occurrs while handling the packageslist + """ + pass class SourceError(Exception): - """Raised when an error occurs while handling the sourceslist - """ - pass + """Raised when an error occurs while handling the sourceslist + """ + pass diff --git a/bootstrapvz/base/pkg/packagelist.py b/bootstrapvz/base/pkg/packagelist.py index 5d5c0c7..3a45d85 100644 --- a/bootstrapvz/base/pkg/packagelist.py +++ b/bootstrapvz/base/pkg/packagelist.py @@ -1,108 +1,108 @@ class PackageList(object): - """Represents a list of packages - """ + """Represents a list of packages + """ - class Remote(object): - """A remote package with an optional target - """ - def __init__(self, name, target): - """ - :param str name: The name of the package - :param str target: The name of the target release - """ - self.name = name - self.target = target + class Remote(object): + """A remote package with an optional target + """ + def __init__(self, name, target): + """ + :param str name: The name of the package + :param str target: The name of the target release + """ + self.name = name + self.target = target - def __str__(self): - """Converts the package into somehting that apt-get install can parse + def __str__(self): + """Converts the package into somehting that apt-get install can parse - :rtype: str - """ - if self.target is None: - return self.name - else: - return self.name + '/' + self.target + :rtype: str + """ + if self.target is None: + return self.name + else: + return self.name + '/' + self.target - class Local(object): - """A local package - """ - def __init__(self, path): - """ - :param str path: The path to the local package - """ - self.path = path + class Local(object): + """A local package + """ + def __init__(self, path): + """ + :param str path: The path to the local package + """ + self.path = path - def __str__(self): - """ - :return: The path to the local package - :rtype: string - """ - return self.path + def __str__(self): + """ + :return: The path to the local package + :rtype: string + """ + return self.path - def __init__(self, manifest_vars, source_lists): - """ - :param dict manifest_vars: The manifest variables - :param SourceLists source_lists: The sourcelists for apt - """ - self.manifest_vars = manifest_vars - self.source_lists = source_lists - # The default_target is the release we are bootstrapping - self.default_target = '{system.release}'.format(**self.manifest_vars) - # The list of packages that should be installed, this is not a set. - # We want to preserve the order in which the packages were added so that local - # packages may be installed in the correct order. - self.install = [] - # A function that filters the install list and only returns remote packages - self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install) + def __init__(self, manifest_vars, source_lists): + """ + :param dict manifest_vars: The manifest variables + :param SourceLists source_lists: The sourcelists for apt + """ + self.manifest_vars = manifest_vars + self.source_lists = source_lists + # The default_target is the release we are bootstrapping + self.default_target = '{system.release}'.format(**self.manifest_vars) + # The list of packages that should be installed, this is not a set. + # We want to preserve the order in which the packages were added so that local + # packages may be installed in the correct order. + self.install = [] + # A function that filters the install list and only returns remote packages + self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install) - def add(self, name, target=None): - """Adds a package to the install list + def add(self, name, target=None): + """Adds a package to the install list - :param str name: The name of the package to install, may contain manifest vars references - :param str target: The name of the target release for the package, may contain manifest vars references + :param str name: The name of the package to install, may contain manifest vars references + :param str target: The name of the target release for the package, may contain manifest vars references - :raises PackageError: When a package of the same name but with a different target has already been added. - :raises PackageError: When the specified target release could not be found. - """ - from exceptions import PackageError - name = name.format(**self.manifest_vars) - if target is not None: - target = target.format(**self.manifest_vars) - # Check if the package has already been added. - # If so, make sure it's the same target and raise a PackageError otherwise - package = next((pkg for pkg in self.remote() if pkg.name == name), None) - if package is not None: - # It's the same target if the target names match or one of the targets is None - # and the other is the default target. - same_target = package.target == target - same_target = same_target or package.target is None and target == self.default_target - same_target = same_target or package.target == self.default_target and target is None - if not same_target: - msg = ('The package {name} was already added to the package list, ' - 'but with target release `{target}\' instead of `{add_target}\'' - .format(name=name, target=package.target, add_target=target)) - raise PackageError(msg) - # The package has already been added, skip the checks below - return + :raises PackageError: When a package of the same name but with a different target has already been added. + :raises PackageError: When the specified target release could not be found. + """ + from exceptions import PackageError + name = name.format(**self.manifest_vars) + if target is not None: + target = target.format(**self.manifest_vars) + # Check if the package has already been added. + # If so, make sure it's the same target and raise a PackageError otherwise + package = next((pkg for pkg in self.remote() if pkg.name == name), None) + if package is not None: + # It's the same target if the target names match or one of the targets is None + # and the other is the default target. + same_target = package.target == target + same_target = same_target or package.target is None and target == self.default_target + same_target = same_target or package.target == self.default_target and target is None + if not same_target: + msg = ('The package {name} was already added to the package list, ' + 'but with target release `{target}\' instead of `{add_target}\'' + .format(name=name, target=package.target, add_target=target)) + raise PackageError(msg) + # The package has already been added, skip the checks below + return - # Check if the target exists (unless it's the default target) in the sources list - # raise a PackageError if does not - if target not in (None, self.default_target) and not self.source_lists.target_exists(target): - msg = ('The target release {target} was not found in the sources list').format(target=target) - raise PackageError(msg) + # Check if the target exists (unless it's the default target) in the sources list + # raise a PackageError if does not + if target not in (None, self.default_target) and not self.source_lists.target_exists(target): + msg = ('The target release {target} was not found in the sources list').format(target=target) + raise PackageError(msg) - # Note that we maintain the target value even if it is none. - # This allows us to preserve the semantics of the default target when calling apt-get install - # Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define - # a target release. Only `apt-get install nfs-client` works. - self.install.append(self.Remote(name, target)) + # Note that we maintain the target value even if it is none. + # This allows us to preserve the semantics of the default target when calling apt-get install + # Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define + # a target release. Only `apt-get install nfs-client` works. + self.install.append(self.Remote(name, target)) - def add_local(self, package_path): - """Adds a local package to the installation list + def add_local(self, package_path): + """Adds a local package to the installation list - :param str package_path: Path to the local package, may contain manifest vars references - """ - package_path = package_path.format(**self.manifest_vars) - self.install.append(self.Local(package_path)) + :param str package_path: Path to the local package, may contain manifest vars references + """ + package_path = package_path.format(**self.manifest_vars) + self.install.append(self.Local(package_path)) diff --git a/bootstrapvz/base/pkg/preferenceslist.py b/bootstrapvz/base/pkg/preferenceslist.py index e89c2fa..da855ae 100644 --- a/bootstrapvz/base/pkg/preferenceslist.py +++ b/bootstrapvz/base/pkg/preferenceslist.py @@ -1,42 +1,42 @@ class PreferenceLists(object): - """Represents a list of preferences lists for apt - """ + """Represents a list of preferences lists for apt + """ - def __init__(self, manifest_vars): - """ - :param dict manifest_vars: The manifest variables - """ - # A dictionary with the name of the file in preferences.d as the key - # That values are lists of Preference objects - self.preferences = {} - # Save the manifest variables, we need the later on - self.manifest_vars = manifest_vars + def __init__(self, manifest_vars): + """ + :param dict manifest_vars: The manifest variables + """ + # A dictionary with the name of the file in preferences.d as the key + # That values are lists of Preference objects + self.preferences = {} + # Save the manifest variables, we need the later on + self.manifest_vars = manifest_vars - def add(self, name, preferences): - """Adds a preference to the apt preferences list + def add(self, name, preferences): + """Adds a preference to the apt preferences list - :param str name: Name of the file in preferences.list.d, may contain manifest vars references - :param object preferences: The preferences - """ - name = name.format(**self.manifest_vars) - self.preferences[name] = [Preference(p) for p in preferences] + :param str name: Name of the file in preferences.list.d, may contain manifest vars references + :param object preferences: The preferences + """ + name = name.format(**self.manifest_vars) + self.preferences[name] = [Preference(p) for p in preferences] class Preference(object): - """Represents a single preference - """ + """Represents a single preference + """ - def __init__(self, preference): - """ - :param dict preference: A apt preference dictionary - """ - self.preference = preference + def __init__(self, preference): + """ + :param dict preference: A apt preference dictionary + """ + self.preference = preference - def __str__(self): - """Convert the object into a preference block + def __str__(self): + """Convert the object into a preference block - :rtype: str - """ - return "Package: {package}\nPin: {pin}\nPin-Priority: {pin-priority}\n".format(**self.preference) + :rtype: str + """ + return "Package: {package}\nPin: {pin}\nPin-Priority: {pin-priority}\n".format(**self.preference) diff --git a/bootstrapvz/base/pkg/sourceslist.py b/bootstrapvz/base/pkg/sourceslist.py index f872b52..2539611 100644 --- a/bootstrapvz/base/pkg/sourceslist.py +++ b/bootstrapvz/base/pkg/sourceslist.py @@ -1,95 +1,95 @@ class SourceLists(object): - """Represents a list of sources lists for apt - """ + """Represents a list of sources lists for apt + """ - def __init__(self, manifest_vars): - """ - :param dict manifest_vars: The manifest variables - """ - # A dictionary with the name of the file in sources.list.d as the key - # That values are lists of Source objects - self.sources = {} - # Save the manifest variables, we need the later on - self.manifest_vars = manifest_vars + def __init__(self, manifest_vars): + """ + :param dict manifest_vars: The manifest variables + """ + # A dictionary with the name of the file in sources.list.d as the key + # That values are lists of Source objects + self.sources = {} + # Save the manifest variables, we need the later on + self.manifest_vars = manifest_vars - def add(self, name, line): - """Adds a source to the apt sources list + def add(self, name, line): + """Adds a source to the apt sources list - :param str name: Name of the file in sources.list.d, may contain manifest vars references - :param str line: The line for the source file, may contain manifest vars references - """ - name = name.format(**self.manifest_vars) - line = line.format(**self.manifest_vars) - if name not in self.sources: - self.sources[name] = [] - self.sources[name].append(Source(line)) + :param str name: Name of the file in sources.list.d, may contain manifest vars references + :param str line: The line for the source file, may contain manifest vars references + """ + name = name.format(**self.manifest_vars) + line = line.format(**self.manifest_vars) + if name not in self.sources: + self.sources[name] = [] + self.sources[name].append(Source(line)) - def target_exists(self, target): - """Checks whether the target exists in the sources list + def target_exists(self, target): + """Checks whether the target exists in the sources list - :param str target: Name of the target to check for, may contain manifest vars references + :param str target: Name of the target to check for, may contain manifest vars references - :return: Whether the target exists - :rtype: bool - """ - target = target.format(**self.manifest_vars) - # Run through all the sources and return True if the target exists - for lines in self.sources.itervalues(): - if target in (source.distribution for source in lines): - return True - return False + :return: Whether the target exists + :rtype: bool + """ + target = target.format(**self.manifest_vars) + # Run through all the sources and return True if the target exists + for lines in self.sources.itervalues(): + if target in (source.distribution for source in lines): + return True + return False class Source(object): - """Represents a single source line - """ + """Represents a single source line + """ - def __init__(self, line): - """ - :param str line: A apt source line + def __init__(self, line): + """ + :param str line: A apt source line - :raises SourceError: When the source line cannot be parsed - """ - # Parse the source line and populate the class attributes with it - # The format is taken from `man sources.list` - # or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en - import re - regexp = re.compile('^(?Pdeb|deb-src)\s+' - '(\[\s*(?P.+\S)?\s*\]\s+)?' - '(?P\S+)\s+' - '(?P\S+)' - '(\s+(?P.+\S))?\s*$') - match = regexp.match(line).groupdict() - if match is None: - from exceptions import SourceError - raise SourceError('Unable to parse source line: ' + line) - self.type = match['type'] - self.options = [] - if match['options'] is not None: - self.options = re.sub(' +', ' ', match['options']).split(' ') - self.uri = match['uri'] - self.distribution = match['distribution'] - self.components = [] - if match['components'] is not None: - self.components = re.sub(' +', ' ', match['components']).split(' ') + :raises SourceError: When the source line cannot be parsed + """ + # Parse the source line and populate the class attributes with it + # The format is taken from `man sources.list` + # or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en + import re + regexp = re.compile('^(?Pdeb|deb-src)\s+' + '(\[\s*(?P.+\S)?\s*\]\s+)?' + '(?P\S+)\s+' + '(?P\S+)' + '(\s+(?P.+\S))?\s*$') + match = regexp.match(line).groupdict() + if match is None: + from exceptions import SourceError + raise SourceError('Unable to parse source line: ' + line) + self.type = match['type'] + self.options = [] + if match['options'] is not None: + self.options = re.sub(' +', ' ', match['options']).split(' ') + self.uri = match['uri'] + self.distribution = match['distribution'] + self.components = [] + if match['components'] is not None: + self.components = re.sub(' +', ' ', match['components']).split(' ') - def __str__(self): - """Convert the object into a source line - This is pretty much the reverse of what we're doing in the initialization function. + def __str__(self): + """Convert the object into a source line + This is pretty much the reverse of what we're doing in the initialization function. - :rtype: str - """ - options = '' - if len(self.options) > 0: - options = ' [{options}]'.format(options=' '.join(self.options)) + :rtype: str + """ + options = '' + if len(self.options) > 0: + options = ' [{options}]'.format(options=' '.join(self.options)) - components = '' - if len(self.components) > 0: - components = ' {components}'.format(components=' '.join(self.components)) + components = '' + if len(self.components) > 0: + components = ' {components}'.format(components=' '.join(self.components)) - return ('{type}{options} {uri} {distribution}{components}' - .format(type=self.type, options=options, - uri=self.uri, distribution=self.distribution, - components=components)) + return ('{type}{options} {uri} {distribution}{components}' + .format(type=self.type, options=options, + uri=self.uri, distribution=self.distribution, + components=components)) diff --git a/bootstrapvz/base/task.py b/bootstrapvz/base/task.py index d38e778..6bd9eba 100644 --- a/bootstrapvz/base/task.py +++ b/bootstrapvz/base/task.py @@ -1,36 +1,36 @@ class Task(object): - """The task class represents a task that can be run. - It is merely a wrapper for the run function and should never be instantiated. - """ - # The phase this task is located in. - phase = None - # List of tasks that should run before this task is run - predecessors = [] - # List of tasks that should run after this task has run - successors = [] + """The task class represents a task that can be run. + It is merely a wrapper for the run function and should never be instantiated. + """ + # The phase this task is located in. + phase = None + # List of tasks that should run before this task is run + predecessors = [] + # List of tasks that should run after this task has run + successors = [] - class __metaclass__(type): - """Metaclass to control how the class is coerced into a string - """ - def __repr__(cls): - """ - :return str: The full module path to the Task - """ - return cls.__module__ + '.' + cls.__name__ + class __metaclass__(type): + """Metaclass to control how the class is coerced into a string + """ + def __repr__(cls): + """ + :return str: The full module path to the Task + """ + return cls.__module__ + '.' + cls.__name__ - def __str__(cls): - """ - :return: The full module path to the Task - :rtype: str - """ - return repr(cls) + def __str__(cls): + """ + :return: The full module path to the Task + :rtype: str + """ + return repr(cls) - @classmethod - def run(cls, info): - """The run function, all work is done inside this function + @classmethod + def run(cls, info): + """The run function, all work is done inside this function - :param BootstrapInformation info: The bootstrap info object. - """ - pass + :param BootstrapInformation info: The bootstrap info object. + """ + pass diff --git a/bootstrapvz/base/tasklist.py b/bootstrapvz/base/tasklist.py index 0503e26..5b7c404 100644 --- a/bootstrapvz/base/tasklist.py +++ b/bootstrapvz/base/tasklist.py @@ -7,273 +7,273 @@ log = logging.getLogger(__name__) class TaskList(object): - """The tasklist class aggregates all tasks that should be run - and orders them according to their dependencies. - """ + """The tasklist class aggregates all tasks that should be run + and orders them according to their dependencies. + """ - def __init__(self, tasks): - self.tasks = tasks - self.tasks_completed = [] + def __init__(self, tasks): + self.tasks = tasks + self.tasks_completed = [] - def run(self, info, dry_run=False): - """Converts the taskgraph into a list and runs all tasks in that list + def run(self, info, dry_run=False): + """Converts the taskgraph into a list and runs all tasks in that list - :param dict info: The bootstrap information object - :param bool dry_run: Whether to actually run the tasks or simply step through them - """ - # Get a hold of every task we can find, so that we can topologically sort - # all tasks, rather than just the subset we are going to run. - from bootstrapvz.common import tasks as common_tasks - modules = [common_tasks, info.manifest.modules['provider']] + info.manifest.modules['plugins'] - all_tasks = set(get_all_tasks(modules)) - # Create a list for us to run - task_list = create_list(self.tasks, all_tasks) - # Output the tasklist - log.debug('Tasklist:\n\t' + ('\n\t'.join(map(repr, task_list)))) + :param dict info: The bootstrap information object + :param bool dry_run: Whether to actually run the tasks or simply step through them + """ + # Get a hold of every task we can find, so that we can topologically sort + # all tasks, rather than just the subset we are going to run. + from bootstrapvz.common import tasks as common_tasks + modules = [common_tasks, info.manifest.modules['provider']] + info.manifest.modules['plugins'] + all_tasks = set(get_all_tasks(modules)) + # Create a list for us to run + task_list = create_list(self.tasks, all_tasks) + # Output the tasklist + log.debug('Tasklist:\n\t' + ('\n\t'.join(map(repr, task_list)))) - for task in task_list: - # Tasks are not required to have a description - if hasattr(task, 'description'): - log.info(task.description) - else: - # If there is no description, simply coerce the task into a string and print its name - log.info('Running ' + str(task)) - if not dry_run: - # Run the task - task.run(info) - # Remember which tasks have been run for later use (e.g. when rolling back, because of an error) - self.tasks_completed.append(task) + for task in task_list: + # Tasks are not required to have a description + if hasattr(task, 'description'): + log.info(task.description) + else: + # If there is no description, simply coerce the task into a string and print its name + log.info('Running ' + str(task)) + if not dry_run: + # Run the task + task.run(info) + # Remember which tasks have been run for later use (e.g. when rolling back, because of an error) + self.tasks_completed.append(task) def load_tasks(function, manifest, *args): - """Calls ``function`` on the provider and all plugins that have been loaded by the manifest. - Any additional arguments are passed directly to ``function``. - The function that is called shall accept the taskset as its first argument and the manifest - as its second argument. + """Calls ``function`` on the provider and all plugins that have been loaded by the manifest. + Any additional arguments are passed directly to ``function``. + The function that is called shall accept the taskset as its first argument and the manifest + as its second argument. - :param str function: Name of the function to call - :param Manifest manifest: The manifest - :param list args: Additional arguments that should be passed to the function that is called - """ - tasks = set() - # Call 'function' on the provider - getattr(manifest.modules['provider'], function)(tasks, manifest, *args) - for plugin in manifest.modules['plugins']: - # Plugins are not required to have whatever function we call - fn = getattr(plugin, function, None) - if callable(fn): - fn(tasks, manifest, *args) - return tasks + :param str function: Name of the function to call + :param Manifest manifest: The manifest + :param list args: Additional arguments that should be passed to the function that is called + """ + tasks = set() + # Call 'function' on the provider + getattr(manifest.modules['provider'], function)(tasks, manifest, *args) + for plugin in manifest.modules['plugins']: + # Plugins are not required to have whatever function we call + fn = getattr(plugin, function, None) + if callable(fn): + fn(tasks, manifest, *args) + return tasks def create_list(taskset, all_tasks): - """Creates a list of all the tasks that should be run. - """ - from bootstrapvz.common.phases import order - # Make sure all_tasks is a superset of the resolved taskset - if not all_tasks >= taskset: - msg = ('bootstrap-vz generated a list of all available tasks. ' - 'That list is not a superset of the tasks required for bootstrapping. ' - 'The tasks that were not found are: {tasks} ' - '(This is an error in the code and not the manifest, please report this issue.)' - .format(tasks=', '.join(map(str, taskset - all_tasks))) - ) - raise TaskListError(msg) - # Create a graph over all tasks by creating a map of each tasks successors - graph = {} - for task in all_tasks: - # Do a sanity check first - check_ordering(task) - successors = set() - # Add all successors mentioned in the task - successors.update(task.successors) - # Add all tasks that mention this task as a predecessor - successors.update(filter(lambda succ: task in succ.predecessors, all_tasks)) - # Create a list of phases that succeed the phase of this task - succeeding_phases = order[order.index(task.phase) + 1:] - # Add all tasks that occur in above mentioned succeeding phases - successors.update(filter(lambda succ: succ.phase in succeeding_phases, all_tasks)) - # Map the successors to the task - graph[task] = successors + """Creates a list of all the tasks that should be run. + """ + from bootstrapvz.common.phases import order + # Make sure all_tasks is a superset of the resolved taskset + if not all_tasks >= taskset: + msg = ('bootstrap-vz generated a list of all available tasks. ' + 'That list is not a superset of the tasks required for bootstrapping. ' + 'The tasks that were not found are: {tasks} ' + '(This is an error in the code and not the manifest, please report this issue.)' + .format(tasks=', '.join(map(str, taskset - all_tasks))) + ) + raise TaskListError(msg) + # Create a graph over all tasks by creating a map of each tasks successors + graph = {} + for task in all_tasks: + # Do a sanity check first + check_ordering(task) + successors = set() + # Add all successors mentioned in the task + successors.update(task.successors) + # Add all tasks that mention this task as a predecessor + successors.update(filter(lambda succ: task in succ.predecessors, all_tasks)) + # Create a list of phases that succeed the phase of this task + succeeding_phases = order[order.index(task.phase) + 1:] + # Add all tasks that occur in above mentioned succeeding phases + successors.update(filter(lambda succ: succ.phase in succeeding_phases, all_tasks)) + # Map the successors to the task + graph[task] = successors - # Use the strongly connected components algorithm to check for cycles in our task graph - components = strongly_connected_components(graph) - cycles_found = 0 - for component in components: - # Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out - if len(component) > 1: - cycles_found += 1 - log.debug('Cycle: {list}\n' + (', '.join(map(repr, component)))) - if cycles_found > 0: - msg = ('{num} cycles were found in the tasklist, ' - 'consult the logfile for more information.'.format(num=cycles_found)) - raise TaskListError(msg) + # Use the strongly connected components algorithm to check for cycles in our task graph + components = strongly_connected_components(graph) + cycles_found = 0 + for component in components: + # Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out + if len(component) > 1: + cycles_found += 1 + log.debug('Cycle: {list}\n' + (', '.join(map(repr, component)))) + if cycles_found > 0: + msg = ('{num} cycles were found in the tasklist, ' + 'consult the logfile for more information.'.format(num=cycles_found)) + raise TaskListError(msg) - # Run a topological sort on the graph, returning an ordered list - sorted_tasks = topological_sort(graph) + # Run a topological sort on the graph, returning an ordered list + sorted_tasks = topological_sort(graph) - # Filter out any tasks not in the tasklist - # We want to maintain ordering, so we don't use set intersection - sorted_tasks = filter(lambda task: task in taskset, sorted_tasks) - return sorted_tasks + # Filter out any tasks not in the tasklist + # We want to maintain ordering, so we don't use set intersection + sorted_tasks = filter(lambda task: task in taskset, sorted_tasks) + return sorted_tasks def get_all_tasks(modules): - """Gets a list of all task classes in the package + """Gets a list of all task classes in the package - :return: A list of all tasks in the package - :rtype: list - """ - import os.path - # Get generators that return all classes in a module - generators = [] - for module in modules: - module_path = os.path.dirname(module.__file__) - module_prefix = module.__name__ + '.' - generators.append(get_all_classes(module_path, module_prefix)) - import itertools - classes = itertools.chain(*generators) + :return: A list of all tasks in the package + :rtype: list + """ + import os.path + # Get generators that return all classes in a module + generators = [] + for module in modules: + module_path = os.path.dirname(module.__file__) + module_prefix = module.__name__ + '.' + generators.append(get_all_classes(module_path, module_prefix)) + import itertools + classes = itertools.chain(*generators) - # lambda function to check whether a class is a task (excluding the superclass Task) - def is_task(obj): - from task import Task - return issubclass(obj, Task) and obj is not Task - return filter(is_task, classes) # Only return classes that are tasks + # lambda function to check whether a class is a task (excluding the superclass Task) + def is_task(obj): + from task import Task + return issubclass(obj, Task) and obj is not Task + return filter(is_task, classes) # Only return classes that are tasks def get_all_classes(path=None, prefix='', excludes=[]): - """ Given a path to a package, this function retrieves all the classes in it + """ Given a path to a package, this function retrieves all the classes in it - :param str path: Path to the package - :param str prefix: Name of the package followed by a dot - :param list excludes: List of str matching module names that should be ignored - :return: A generator that yields classes - :rtype: generator - :raises Exception: If a module cannot be inspected. - """ - import pkgutil - import importlib - import inspect + :param str path: Path to the package + :param str prefix: Name of the package followed by a dot + :param list excludes: List of str matching module names that should be ignored + :return: A generator that yields classes + :rtype: generator + :raises Exception: If a module cannot be inspected. + """ + import pkgutil + import importlib + import inspect - def walk_error(module_name): - if not any(map(lambda excl: module_name.startswith(excl), excludes)): - raise TaskListError('Unable to inspect module ' + module_name) - walker = pkgutil.walk_packages([path], prefix, walk_error) - for _, module_name, _ in walker: - if any(map(lambda excl: module_name.startswith(excl), excludes)): - continue - module = importlib.import_module(module_name) - classes = inspect.getmembers(module, inspect.isclass) - for class_name, obj in classes: - # We only want classes that are defined in the module, and not imported ones - if obj.__module__ == module_name: - yield obj + def walk_error(module_name): + if not any(map(lambda excl: module_name.startswith(excl), excludes)): + raise TaskListError('Unable to inspect module ' + module_name) + walker = pkgutil.walk_packages([path], prefix, walk_error) + for _, module_name, _ in walker: + if any(map(lambda excl: module_name.startswith(excl), excludes)): + continue + module = importlib.import_module(module_name) + classes = inspect.getmembers(module, inspect.isclass) + for class_name, obj in classes: + # We only want classes that are defined in the module, and not imported ones + if obj.__module__ == module_name: + yield obj def check_ordering(task): - """Checks the ordering of a task in relation to other tasks and their phases. + """Checks the ordering of a task in relation to other tasks and their phases. - This function checks for a subset of what the strongly connected components algorithm does, - but can deliver a more precise error message, namely that there is a conflict between - what a task has specified as its predecessors or successors and in which phase it is placed. + This function checks for a subset of what the strongly connected components algorithm does, + but can deliver a more precise error message, namely that there is a conflict between + what a task has specified as its predecessors or successors and in which phase it is placed. - :param Task task: The task to check the ordering for - :raises TaskListError: If there is a conflict between task precedence and phase precedence - """ - for successor in task.successors: - # Run through all successors and throw an error if the phase of the task - # lies before the phase of a successor, log a warning if it lies after. - if task.phase > successor.phase: - msg = ("The task {task} is specified as running before {other}, " - "but its phase '{phase}' lies after the phase '{other_phase}'" - .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) - raise TaskListError(msg) - if task.phase < successor.phase: - log.warn("The task {task} is specified as running before {other} " - "although its phase '{phase}' already lies before the phase '{other_phase}' " - "(or the task has been placed in the wrong phase)" - .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) - for predecessor in task.predecessors: - # Run through all successors and throw an error if the phase of the task - # lies after the phase of a predecessor, log a warning if it lies before. - if task.phase < predecessor.phase: - msg = ("The task {task} is specified as running after {other}, " - "but its phase '{phase}' lies before the phase '{other_phase}'" - .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) - raise TaskListError(msg) - if task.phase > predecessor.phase: - log.warn("The task {task} is specified as running after {other} " - "although its phase '{phase}' already lies after the phase '{other_phase}' " - "(or the task has been placed in the wrong phase)" - .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) + :param Task task: The task to check the ordering for + :raises TaskListError: If there is a conflict between task precedence and phase precedence + """ + for successor in task.successors: + # Run through all successors and throw an error if the phase of the task + # lies before the phase of a successor, log a warning if it lies after. + if task.phase > successor.phase: + msg = ("The task {task} is specified as running before {other}, " + "but its phase '{phase}' lies after the phase '{other_phase}'" + .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) + raise TaskListError(msg) + if task.phase < successor.phase: + log.warn("The task {task} is specified as running before {other} " + "although its phase '{phase}' already lies before the phase '{other_phase}' " + "(or the task has been placed in the wrong phase)" + .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) + for predecessor in task.predecessors: + # Run through all successors and throw an error if the phase of the task + # lies after the phase of a predecessor, log a warning if it lies before. + if task.phase < predecessor.phase: + msg = ("The task {task} is specified as running after {other}, " + "but its phase '{phase}' lies before the phase '{other_phase}'" + .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) + raise TaskListError(msg) + if task.phase > predecessor.phase: + log.warn("The task {task} is specified as running after {other} " + "although its phase '{phase}' already lies after the phase '{other_phase}' " + "(or the task has been placed in the wrong phase)" + .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) def strongly_connected_components(graph): - """Find the strongly connected components in a graph using Tarjan's algorithm. + """Find the strongly connected components in a graph using Tarjan's algorithm. - Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py + Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py - :param dict graph: mapping of tasks to lists of successor tasks - :return: List of tuples that are strongly connected comoponents - :rtype: list - """ + :param dict graph: mapping of tasks to lists of successor tasks + :return: List of tuples that are strongly connected comoponents + :rtype: list + """ - result = [] - stack = [] - low = {} + result = [] + stack = [] + low = {} - def visit(node): - if node in low: - return + def visit(node): + if node in low: + return - num = len(low) - low[node] = num - stack_pos = len(stack) - stack.append(node) + num = len(low) + low[node] = num + stack_pos = len(stack) + stack.append(node) - for successor in graph[node]: - visit(successor) - low[node] = min(low[node], low[successor]) + for successor in graph[node]: + visit(successor) + low[node] = min(low[node], low[successor]) - if num == low[node]: - component = tuple(stack[stack_pos:]) - del stack[stack_pos:] - result.append(component) - for item in component: - low[item] = len(graph) + if num == low[node]: + component = tuple(stack[stack_pos:]) + del stack[stack_pos:] + result.append(component) + for item in component: + low[item] = len(graph) - for node in graph: - visit(node) + for node in graph: + visit(node) - return result + return result def topological_sort(graph): - """Runs a topological sort on a graph. + """Runs a topological sort on a graph. - Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py + Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py - :param dict graph: mapping of tasks to lists of successor tasks - :return: A list of all tasks in the graph sorted according to ther dependencies - :rtype: list - """ - count = {} - for node in graph: - count[node] = 0 - for node in graph: - for successor in graph[node]: - count[successor] += 1 + :param dict graph: mapping of tasks to lists of successor tasks + :return: A list of all tasks in the graph sorted according to ther dependencies + :rtype: list + """ + count = {} + for node in graph: + count[node] = 0 + for node in graph: + for successor in graph[node]: + count[successor] += 1 - ready = [node for node in graph if count[node] == 0] + ready = [node for node in graph if count[node] == 0] - result = [] - while ready: - node = ready.pop(-1) - result.append(node) + result = [] + while ready: + node = ready.pop(-1) + result.append(node) - for successor in graph[node]: - count[successor] -= 1 - if count[successor] == 0: - ready.append(successor) + for successor in graph[node]: + count[successor] -= 1 + if count[successor] == 0: + ready.append(successor) - return result + return result diff --git a/bootstrapvz/common/bytes.py b/bootstrapvz/common/bytes.py index f1e48ba..7fab450 100644 --- a/bootstrapvz/common/bytes.py +++ b/bootstrapvz/common/bytes.py @@ -2,158 +2,158 @@ from exceptions import UnitError def onlybytes(msg): - def decorator(func): - def check_other(self, other): - if not isinstance(other, Bytes): - raise UnitError(msg) - return func(self, other) - return check_other - return decorator + def decorator(func): + def check_other(self, other): + if not isinstance(other, Bytes): + raise UnitError(msg) + return func(self, other) + return check_other + return decorator class Bytes(object): - units = {'B': 1, - 'KiB': 1024, - 'MiB': 1024 * 1024, - 'GiB': 1024 * 1024 * 1024, - 'TiB': 1024 * 1024 * 1024 * 1024, - } + units = {'B': 1, + 'KiB': 1024, + 'MiB': 1024 * 1024, + 'GiB': 1024 * 1024 * 1024, + 'TiB': 1024 * 1024 * 1024 * 1024, + } - def __init__(self, qty): - if isinstance(qty, (int, long)): - self.qty = qty - else: - self.qty = Bytes.parse(qty) + def __init__(self, qty): + if isinstance(qty, (int, long)): + self.qty = qty + else: + self.qty = Bytes.parse(qty) - @staticmethod - def parse(qty_str): - import re - regex = re.compile('^(?P\d+)(?P[KMGT]i?B|B)$') - parsed = regex.match(qty_str) - if parsed is None: - raise UnitError('Unable to parse ' + qty_str) + @staticmethod + def parse(qty_str): + import re + regex = re.compile('^(?P\d+)(?P[KMGT]i?B|B)$') + parsed = regex.match(qty_str) + if parsed is None: + raise UnitError('Unable to parse ' + qty_str) - qty = int(parsed.group('qty')) - unit = parsed.group('unit') - if unit[0] in 'KMGT': - unit = unit[0] + 'iB' - byte_qty = qty * Bytes.units[unit] - return byte_qty + qty = int(parsed.group('qty')) + unit = parsed.group('unit') + if unit[0] in 'KMGT': + unit = unit[0] + 'iB' + byte_qty = qty * Bytes.units[unit] + return byte_qty - def get_qty_in(self, unit): - if unit[0] in 'KMGT': - unit = unit[0] + 'iB' - if unit not in Bytes.units: - raise UnitError('Unrecognized unit: ' + unit) - if self.qty % Bytes.units[unit] != 0: - msg = 'Unable to convert {qty} bytes to a whole number in {unit}'.format(qty=self.qty, unit=unit) - raise UnitError(msg) - return self.qty / Bytes.units[unit] + def get_qty_in(self, unit): + if unit[0] in 'KMGT': + unit = unit[0] + 'iB' + if unit not in Bytes.units: + raise UnitError('Unrecognized unit: ' + unit) + if self.qty % Bytes.units[unit] != 0: + msg = 'Unable to convert {qty} bytes to a whole number in {unit}'.format(qty=self.qty, unit=unit) + raise UnitError(msg) + return self.qty / Bytes.units[unit] - def __repr__(self): - converted = str(self.get_qty_in('B')) + 'B' - if self.qty == 0: - return converted - for unit in ['TiB', 'GiB', 'MiB', 'KiB']: - try: - converted = str(self.get_qty_in(unit)) + unit - break - except UnitError: - pass - return converted + def __repr__(self): + converted = str(self.get_qty_in('B')) + 'B' + if self.qty == 0: + return converted + for unit in ['TiB', 'GiB', 'MiB', 'KiB']: + try: + converted = str(self.get_qty_in(unit)) + unit + break + except UnitError: + pass + return converted - def __str__(self): - return self.__repr__() + def __str__(self): + return self.__repr__() - def __int__(self): - return self.qty + def __int__(self): + return self.qty - def __long__(self): - return self.qty + def __long__(self): + return self.qty - @onlybytes('Can only compare Bytes to Bytes') - def __lt__(self, other): - return self.qty < other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __lt__(self, other): + return self.qty < other.qty - @onlybytes('Can only compare Bytes to Bytes') - def __le__(self, other): - return self.qty <= other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __le__(self, other): + return self.qty <= other.qty - @onlybytes('Can only compare Bytes to Bytes') - def __eq__(self, other): - return self.qty == other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __eq__(self, other): + return self.qty == other.qty - @onlybytes('Can only compare Bytes to Bytes') - def __ne__(self, other): - return self.qty != other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __ne__(self, other): + return self.qty != other.qty - @onlybytes('Can only compare Bytes to Bytes') - def __ge__(self, other): - return self.qty >= other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __ge__(self, other): + return self.qty >= other.qty - @onlybytes('Can only compare Bytes to Bytes') - def __gt__(self, other): - return self.qty > other.qty + @onlybytes('Can only compare Bytes to Bytes') + def __gt__(self, other): + return self.qty > other.qty - @onlybytes('Can only add Bytes to Bytes') - def __add__(self, other): - return Bytes(self.qty + other.qty) + @onlybytes('Can only add Bytes to Bytes') + def __add__(self, other): + return Bytes(self.qty + other.qty) - @onlybytes('Can only add Bytes to Bytes') - def __iadd__(self, other): - self.qty += other.qty - return self + @onlybytes('Can only add Bytes to Bytes') + def __iadd__(self, other): + self.qty += other.qty + return self - @onlybytes('Can only subtract Bytes from Bytes') - def __sub__(self, other): - return Bytes(self.qty - other.qty) + @onlybytes('Can only subtract Bytes from Bytes') + def __sub__(self, other): + return Bytes(self.qty - other.qty) - @onlybytes('Can only subtract Bytes from Bytes') - def __isub__(self, other): - self.qty -= other.qty - return self + @onlybytes('Can only subtract Bytes from Bytes') + def __isub__(self, other): + self.qty -= other.qty + return self - def __mul__(self, other): - if not isinstance(other, (int, long)): - raise UnitError('Can only multiply Bytes with integers') - return Bytes(self.qty * other) + def __mul__(self, other): + if not isinstance(other, (int, long)): + raise UnitError('Can only multiply Bytes with integers') + return Bytes(self.qty * other) - def __imul__(self, other): - if not isinstance(other, (int, long)): - raise UnitError('Can only multiply Bytes with integers') - self.qty *= other - return self + def __imul__(self, other): + if not isinstance(other, (int, long)): + raise UnitError('Can only multiply Bytes with integers') + self.qty *= other + return self - def __div__(self, other): - if isinstance(other, Bytes): - return self.qty / other.qty - if not isinstance(other, (int, long)): - raise UnitError('Can only divide Bytes with integers or Bytes') - return Bytes(self.qty / other) + def __div__(self, other): + if isinstance(other, Bytes): + return self.qty / other.qty + if not isinstance(other, (int, long)): + raise UnitError('Can only divide Bytes with integers or Bytes') + return Bytes(self.qty / other) - def __idiv__(self, other): - if isinstance(other, Bytes): - self.qty /= other.qty - else: - if not isinstance(other, (int, long)): - raise UnitError('Can only divide Bytes with integers or Bytes') - self.qty /= other - return self + def __idiv__(self, other): + if isinstance(other, Bytes): + self.qty /= other.qty + else: + if not isinstance(other, (int, long)): + raise UnitError('Can only divide Bytes with integers or Bytes') + self.qty /= other + return self - @onlybytes('Can only take modulus of Bytes with Bytes') - def __mod__(self, other): - return Bytes(self.qty % other.qty) + @onlybytes('Can only take modulus of Bytes with Bytes') + def __mod__(self, other): + return Bytes(self.qty % other.qty) - @onlybytes('Can only take modulus of Bytes with Bytes') - def __imod__(self, other): - self.qty %= other.qty - return self + @onlybytes('Can only take modulus of Bytes with Bytes') + def __imod__(self, other): + self.qty %= other.qty + return self - def __getstate__(self): - return {'__class__': self.__module__ + '.' + self.__class__.__name__, - 'qty': self.qty, - } + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'qty': self.qty, + } - def __setstate__(self, state): - self.qty = state['qty'] + def __setstate__(self, state): + self.qty = state['qty'] diff --git a/bootstrapvz/common/exceptions.py b/bootstrapvz/common/exceptions.py index 1703143..b9ccbb2 100644 --- a/bootstrapvz/common/exceptions.py +++ b/bootstrapvz/common/exceptions.py @@ -1,38 +1,38 @@ class ManifestError(Exception): - def __init__(self, message, manifest_path=None, data_path=None): - super(ManifestError, self).__init__(message) - self.message = message - self.manifest_path = manifest_path - self.data_path = data_path - self.args = (self.message, self.manifest_path, self.data_path) + def __init__(self, message, manifest_path=None, data_path=None): + super(ManifestError, self).__init__(message) + self.message = message + self.manifest_path = manifest_path + self.data_path = data_path + self.args = (self.message, self.manifest_path, self.data_path) - def __str__(self): - if self.data_path is not None: - path = '.'.join(map(str, self.data_path)) - return ('{msg}\n File path: {file}\n Data path: {datapath}' - .format(msg=self.message, file=self.manifest_path, datapath=path)) - return '{file}: {msg}'.format(msg=self.message, file=self.manifest_path) + def __str__(self): + if self.data_path is not None: + path = '.'.join(map(str, self.data_path)) + return ('{msg}\n File path: {file}\n Data path: {datapath}' + .format(msg=self.message, file=self.manifest_path, datapath=path)) + return '{file}: {msg}'.format(msg=self.message, file=self.manifest_path) class TaskListError(Exception): - def __init__(self, message): - super(TaskListError, self).__init__(message) - self.message = message - self.args = (self.message,) + def __init__(self, message): + super(TaskListError, self).__init__(message) + self.message = message + self.args = (self.message,) - def __str__(self): - return 'Error in tasklist: ' + self.message + def __str__(self): + return 'Error in tasklist: ' + self.message class TaskError(Exception): - pass + pass class UnexpectedNumMatchesError(Exception): - pass + pass class UnitError(Exception): - pass + pass diff --git a/bootstrapvz/common/fs/__init__.py b/bootstrapvz/common/fs/__init__.py index c393ea2..300765a 100644 --- a/bootstrapvz/common/fs/__init__.py +++ b/bootstrapvz/common/fs/__init__.py @@ -2,32 +2,32 @@ from contextlib import contextmanager def get_partitions(): - import re - regexp = re.compile('^ *(?P\d+) *(?P\d+) *(?P\d+) (?P\S+)$') - matches = {} - path = '/proc/partitions' - with open(path) as partitions: - next(partitions) - next(partitions) - for line in partitions: - match = regexp.match(line) - if match is None: - raise RuntimeError('Unable to parse {line} in {path}'.format(line=line, path=path)) - matches[match.group('dev_name')] = match.groupdict() - return matches + import re + regexp = re.compile('^ *(?P\d+) *(?P\d+) *(?P\d+) (?P\S+)$') + matches = {} + path = '/proc/partitions' + with open(path) as partitions: + next(partitions) + next(partitions) + for line in partitions: + match = regexp.match(line) + if match is None: + raise RuntimeError('Unable to parse {line} in {path}'.format(line=line, path=path)) + matches[match.group('dev_name')] = match.groupdict() + return matches @contextmanager def unmounted(volume): - from bootstrapvz.base.fs.partitionmaps.none import NoPartitions + from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - p_map = volume.partition_map - root_dir = p_map.root.mount_dir - p_map.root.unmount() - if not isinstance(p_map, NoPartitions): - p_map.unmap(volume) - yield - p_map.map(volume) - else: - yield - p_map.root.mount(destination=root_dir) + p_map = volume.partition_map + root_dir = p_map.root.mount_dir + p_map.root.unmount() + if not isinstance(p_map, NoPartitions): + p_map.unmap(volume) + yield + p_map.map(volume) + else: + yield + p_map.root.mount(destination=root_dir) diff --git a/bootstrapvz/common/fs/folder.py b/bootstrapvz/common/fs/folder.py index 6581cee..6517458 100644 --- a/bootstrapvz/common/fs/folder.py +++ b/bootstrapvz/common/fs/folder.py @@ -3,22 +3,22 @@ from bootstrapvz.base.fs.volume import Volume class Folder(Volume): - # Override the states this volume can be in (i.e. we can't "format" or "attach" it) - events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'attached'}, - {'name': 'delete', 'src': 'attached', 'dst': 'deleted'}, - ] + # Override the states this volume can be in (i.e. we can't "format" or "attach" it) + events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'attached'}, + {'name': 'delete', 'src': 'attached', 'dst': 'deleted'}, + ] - extension = 'chroot' + extension = 'chroot' - def create(self, path): - self.fsm.create(path=path) + def create(self, path): + self.fsm.create(path=path) - def _before_create(self, e): - import os - self.path = e.path - os.mkdir(self.path) + def _before_create(self, e): + import os + self.path = e.path + os.mkdir(self.path) - def _before_delete(self, e): - from shutil import rmtree - rmtree(self.path) - del self.path + def _before_delete(self, e): + from shutil import rmtree + rmtree(self.path) + del self.path diff --git a/bootstrapvz/common/fs/loopbackvolume.py b/bootstrapvz/common/fs/loopbackvolume.py index 248f3d7..133b6b8 100644 --- a/bootstrapvz/common/fs/loopbackvolume.py +++ b/bootstrapvz/common/fs/loopbackvolume.py @@ -4,26 +4,26 @@ from ..tools import log_check_call class LoopbackVolume(Volume): - extension = 'raw' + extension = 'raw' - def create(self, image_path): - self.fsm.create(image_path=image_path) + def create(self, image_path): + self.fsm.create(image_path=image_path) - def _before_create(self, e): - self.image_path = e.image_path - size_opt = '--size={mib}M'.format(mib=self.size.bytes.get_qty_in('MiB')) - log_check_call(['truncate', size_opt, self.image_path]) + def _before_create(self, e): + self.image_path = e.image_path + size_opt = '--size={mib}M'.format(mib=self.size.bytes.get_qty_in('MiB')) + log_check_call(['truncate', size_opt, self.image_path]) - def _before_attach(self, e): - [self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path]) - self.device_path = self.loop_device_path + def _before_attach(self, e): + [self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path]) + self.device_path = self.loop_device_path - def _before_detach(self, e): - log_check_call(['losetup', '--detach', self.loop_device_path]) - del self.loop_device_path - self.device_path = None + def _before_detach(self, e): + log_check_call(['losetup', '--detach', self.loop_device_path]) + del self.loop_device_path + self.device_path = None - def _before_delete(self, e): - from os import remove - remove(self.image_path) - del self.image_path + def _before_delete(self, e): + from os import remove + remove(self.image_path) + del self.image_path diff --git a/bootstrapvz/common/fs/qemuvolume.py b/bootstrapvz/common/fs/qemuvolume.py index 0e8c2f0..e04094f 100644 --- a/bootstrapvz/common/fs/qemuvolume.py +++ b/bootstrapvz/common/fs/qemuvolume.py @@ -6,78 +6,78 @@ from . import get_partitions class QEMUVolume(LoopbackVolume): - def _before_create(self, e): - self.image_path = e.image_path - vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M' - log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size]) + def _before_create(self, e): + self.image_path = e.image_path + vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M' + log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size]) - def _check_nbd_module(self): - from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - if isinstance(self.partition_map, NoPartitions): - if not self._module_loaded('nbd'): - msg = ('The kernel module `nbd\' must be loaded ' - '(`modprobe nbd\') to attach .{extension} images' - .format(extension=self.extension)) - raise VolumeError(msg) - else: - num_partitions = len(self.partition_map.partitions) - if not self._module_loaded('nbd'): - msg = ('The kernel module `nbd\' must be loaded ' - '(run `modprobe nbd max_part={num_partitions}\') ' - 'to attach .{extension} images' - .format(num_partitions=num_partitions, extension=self.extension)) - raise VolumeError(msg) - nbd_max_part = int(self._module_param('nbd', 'max_part')) - if nbd_max_part < num_partitions: - # Found here: http://bethesignal.org/blog/2011/01/05/how-to-mount-virtualbox-vdi-image/ - msg = ('The kernel module `nbd\' was loaded with the max_part ' - 'parameter set to {max_part}, which is below ' - 'the amount of partitions for this volume ({num_partitions}). ' - 'Reload the nbd kernel module with max_part set to at least {num_partitions} ' - '(`rmmod nbd; modprobe nbd max_part={num_partitions}\').' - .format(max_part=nbd_max_part, num_partitions=num_partitions)) - raise VolumeError(msg) + def _check_nbd_module(self): + from bootstrapvz.base.fs.partitionmaps.none import NoPartitions + if isinstance(self.partition_map, NoPartitions): + if not self._module_loaded('nbd'): + msg = ('The kernel module `nbd\' must be loaded ' + '(`modprobe nbd\') to attach .{extension} images' + .format(extension=self.extension)) + raise VolumeError(msg) + else: + num_partitions = len(self.partition_map.partitions) + if not self._module_loaded('nbd'): + msg = ('The kernel module `nbd\' must be loaded ' + '(run `modprobe nbd max_part={num_partitions}\') ' + 'to attach .{extension} images' + .format(num_partitions=num_partitions, extension=self.extension)) + raise VolumeError(msg) + nbd_max_part = int(self._module_param('nbd', 'max_part')) + if nbd_max_part < num_partitions: + # Found here: http://bethesignal.org/blog/2011/01/05/how-to-mount-virtualbox-vdi-image/ + msg = ('The kernel module `nbd\' was loaded with the max_part ' + 'parameter set to {max_part}, which is below ' + 'the amount of partitions for this volume ({num_partitions}). ' + 'Reload the nbd kernel module with max_part set to at least {num_partitions} ' + '(`rmmod nbd; modprobe nbd max_part={num_partitions}\').' + .format(max_part=nbd_max_part, num_partitions=num_partitions)) + raise VolumeError(msg) - def _before_attach(self, e): - self._check_nbd_module() - self.loop_device_path = self._find_free_nbd_device() - log_check_call(['qemu-nbd', '--connect', self.loop_device_path, self.image_path]) - self.device_path = self.loop_device_path + def _before_attach(self, e): + self._check_nbd_module() + self.loop_device_path = self._find_free_nbd_device() + log_check_call(['qemu-nbd', '--connect', self.loop_device_path, self.image_path]) + self.device_path = self.loop_device_path - def _before_detach(self, e): - log_check_call(['qemu-nbd', '--disconnect', self.loop_device_path]) - del self.loop_device_path - self.device_path = None + def _before_detach(self, e): + log_check_call(['qemu-nbd', '--disconnect', self.loop_device_path]) + del self.loop_device_path + self.device_path = None - def _module_loaded(self, module): - import re - regexp = re.compile('^{module} +'.format(module=module)) - with open('/proc/modules') as loaded_modules: - for line in loaded_modules: - match = regexp.match(line) - if match is not None: - return True - return False + def _module_loaded(self, module): + import re + regexp = re.compile('^{module} +'.format(module=module)) + with open('/proc/modules') as loaded_modules: + for line in loaded_modules: + match = regexp.match(line) + if match is not None: + return True + return False - def _module_param(self, module, param): - import os.path - param_path = os.path.join('/sys/module', module, 'parameters', param) - with open(param_path) as param: - return param.read().strip() + def _module_param(self, module, param): + import os.path + param_path = os.path.join('/sys/module', module, 'parameters', param) + with open(param_path) as param: + return param.read().strip() - # From http://lists.gnu.org/archive/html/qemu-devel/2011-11/msg02201.html - # Apparently it's not in the current qemu-nbd shipped with wheezy - def _is_nbd_used(self, device_name): - return device_name in get_partitions() + # From http://lists.gnu.org/archive/html/qemu-devel/2011-11/msg02201.html + # Apparently it's not in the current qemu-nbd shipped with wheezy + def _is_nbd_used(self, device_name): + return device_name in get_partitions() - def _find_free_nbd_device(self): - import os.path - for i in xrange(0, 15): - device_name = 'nbd' + str(i) - if not self._is_nbd_used(device_name): - return os.path.join('/dev', device_name) - raise VolumeError('Unable to find free nbd device.') + def _find_free_nbd_device(self): + import os.path + for i in xrange(0, 15): + device_name = 'nbd' + str(i) + if not self._is_nbd_used(device_name): + return os.path.join('/dev', device_name) + raise VolumeError('Unable to find free nbd device.') - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/common/fs/virtualdiskimage.py b/bootstrapvz/common/fs/virtualdiskimage.py index 62b3f09..057ccf3 100644 --- a/bootstrapvz/common/fs/virtualdiskimage.py +++ b/bootstrapvz/common/fs/virtualdiskimage.py @@ -3,13 +3,13 @@ from qemuvolume import QEMUVolume class VirtualDiskImage(QEMUVolume): - extension = 'vdi' - qemu_format = 'vdi' - # VDI format does not have an URI (check here: https://forums.virtualbox.org/viewtopic.php?p=275185#p275185) - ovf_uri = None + extension = 'vdi' + qemu_format = 'vdi' + # VDI format does not have an URI (check here: https://forums.virtualbox.org/viewtopic.php?p=275185#p275185) + ovf_uri = None - def get_uuid(self): - import uuid - with open(self.image_path) as image: - image.seek(392) - return uuid.UUID(bytes_le=image.read(16)) + def get_uuid(self): + import uuid + with open(self.image_path) as image: + image.seek(392) + return uuid.UUID(bytes_le=image.read(16)) diff --git a/bootstrapvz/common/fs/virtualharddisk.py b/bootstrapvz/common/fs/virtualharddisk.py index 8a71ed2..617f7a6 100644 --- a/bootstrapvz/common/fs/virtualharddisk.py +++ b/bootstrapvz/common/fs/virtualharddisk.py @@ -4,20 +4,20 @@ from ..tools import log_check_call class VirtualHardDisk(QEMUVolume): - extension = 'vhd' - qemu_format = 'vpc' - ovf_uri = 'http://go.microsoft.com/fwlink/?LinkId=137171' + extension = 'vhd' + qemu_format = 'vpc' + ovf_uri = 'http://go.microsoft.com/fwlink/?LinkId=137171' - # Azure requires the image size to be a multiple of 1 MiB. - # VHDs are dynamic by default, so we add the option - # to make the image size fixed (subformat=fixed) - def _before_create(self, e): - self.image_path = e.image_path - vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M' - log_check_call(['qemu-img', 'create', '-o', 'subformat=fixed', '-f', self.qemu_format, self.image_path, vol_size]) + # Azure requires the image size to be a multiple of 1 MiB. + # VHDs are dynamic by default, so we add the option + # to make the image size fixed (subformat=fixed) + def _before_create(self, e): + self.image_path = e.image_path + vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M' + log_check_call(['qemu-img', 'create', '-o', 'subformat=fixed', '-f', self.qemu_format, self.image_path, vol_size]) - def get_uuid(self): - if not hasattr(self, 'uuid'): - import uuid - self.uuid = uuid.uuid4() - return self.uuid + def get_uuid(self): + if not hasattr(self, 'uuid'): + import uuid + self.uuid = uuid.uuid4() + return self.uuid diff --git a/bootstrapvz/common/fs/virtualmachinedisk.py b/bootstrapvz/common/fs/virtualmachinedisk.py index 499c84a..73f39be 100644 --- a/bootstrapvz/common/fs/virtualmachinedisk.py +++ b/bootstrapvz/common/fs/virtualmachinedisk.py @@ -3,25 +3,25 @@ from qemuvolume import QEMUVolume class VirtualMachineDisk(QEMUVolume): - extension = 'vmdk' - qemu_format = 'vmdk' - ovf_uri = 'http://www.vmware.com/specifications/vmdk.html#sparse' + extension = 'vmdk' + qemu_format = 'vmdk' + ovf_uri = 'http://www.vmware.com/specifications/vmdk.html#sparse' - def get_uuid(self): - if not hasattr(self, 'uuid'): - import uuid - self.uuid = uuid.uuid4() - return self.uuid - # import uuid - # with open(self.image_path) as image: - # line = '' - # lines_read = 0 - # while 'ddb.uuid.image="' not in line: - # line = image.read() - # lines_read += 1 - # if lines_read > 100: - # from common.exceptions import VolumeError - # raise VolumeError('Unable to find UUID in VMDK file.') - # import re - # matches = re.search('ddb.uuid.image="(?P[^"]+)"', line) - # return uuid.UUID(hex=matches.group('uuid')) + def get_uuid(self): + if not hasattr(self, 'uuid'): + import uuid + self.uuid = uuid.uuid4() + return self.uuid + # import uuid + # with open(self.image_path) as image: + # line = '' + # lines_read = 0 + # while 'ddb.uuid.image="' not in line: + # line = image.read() + # lines_read += 1 + # if lines_read > 100: + # from common.exceptions import VolumeError + # raise VolumeError('Unable to find UUID in VMDK file.') + # import re + # matches = re.search('ddb.uuid.image="(?P[^"]+)"', line) + # return uuid.UUID(hex=matches.group('uuid')) diff --git a/bootstrapvz/common/fsm_proxy.py b/bootstrapvz/common/fsm_proxy.py index a968b92..5db4036 100644 --- a/bootstrapvz/common/fsm_proxy.py +++ b/bootstrapvz/common/fsm_proxy.py @@ -2,60 +2,60 @@ class FSMProxy(object): - def __init__(self, cfg): - from fysom import Fysom - events = set([event['name'] for event in cfg['events']]) - cfg['callbacks'] = self.collect_event_listeners(events, cfg['callbacks']) - self.fsm = Fysom(cfg) - self.attach_proxy_methods(self.fsm, events) + def __init__(self, cfg): + from fysom import Fysom + events = set([event['name'] for event in cfg['events']]) + cfg['callbacks'] = self.collect_event_listeners(events, cfg['callbacks']) + self.fsm = Fysom(cfg) + self.attach_proxy_methods(self.fsm, events) - def collect_event_listeners(self, events, callbacks): - callbacks = callbacks.copy() - callback_names = [] - for event in events: - callback_names.append(('_before_' + event, 'onbefore' + event)) - callback_names.append(('_after_' + event, 'onafter' + event)) - for fn_name, listener in callback_names: - fn = getattr(self, fn_name, None) - if callable(fn): - if listener in callbacks: - old_fn = callbacks[listener] + def collect_event_listeners(self, events, callbacks): + callbacks = callbacks.copy() + callback_names = [] + for event in events: + callback_names.append(('_before_' + event, 'onbefore' + event)) + callback_names.append(('_after_' + event, 'onafter' + event)) + for fn_name, listener in callback_names: + fn = getattr(self, fn_name, None) + if callable(fn): + if listener in callbacks: + old_fn = callbacks[listener] - def wrapper(e, old_fn=old_fn, fn=fn): - old_fn(e) - fn(e) - callbacks[listener] = wrapper - else: - callbacks[listener] = fn - return callbacks + def wrapper(e, old_fn=old_fn, fn=fn): + old_fn(e) + fn(e) + callbacks[listener] = wrapper + else: + callbacks[listener] = fn + return callbacks - def attach_proxy_methods(self, fsm, events): - def make_proxy(fsm, event): - fn = getattr(fsm, event) + def attach_proxy_methods(self, fsm, events): + def make_proxy(fsm, event): + fn = getattr(fsm, event) - def proxy(*args, **kwargs): - if len(args) > 0: - raise FSMProxyError('FSMProxy event listeners only accept named arguments.') - fn(**kwargs) - return proxy + def proxy(*args, **kwargs): + if len(args) > 0: + raise FSMProxyError('FSMProxy event listeners only accept named arguments.') + fn(**kwargs) + return proxy - for event in events: - if not hasattr(self, event): - setattr(self, event, make_proxy(fsm, event)) + for event in events: + if not hasattr(self, event): + setattr(self, event, make_proxy(fsm, event)) - def __getstate__(self): - state = {} - for key, value in self.__dict__.iteritems(): - if callable(value) or key == 'fsm': - continue - state[key] = value - state['__class__'] = self.__module__ + '.' + self.__class__.__name__ - return state + def __getstate__(self): + state = {} + for key, value in self.__dict__.iteritems(): + if callable(value) or key == 'fsm': + continue + state[key] = value + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] class FSMProxyError(Exception): - pass + pass diff --git a/bootstrapvz/common/releases.py b/bootstrapvz/common/releases.py index 558b4b1..24bf400 100644 --- a/bootstrapvz/common/releases.py +++ b/bootstrapvz/common/releases.py @@ -1,34 +1,34 @@ class _Release(object): - def __init__(self, codename, version): - self.codename = codename - self.version = version + def __init__(self, codename, version): + self.codename = codename + self.version = version - def __cmp__(self, other): - return self.version - other.version + def __cmp__(self, other): + return self.version - other.version - def __str__(self): - return self.codename + def __str__(self): + return self.codename - def __getstate__(self): - state = self.__dict__.copy() - state['__class__'] = self.__module__ + '.' + self.__class__.__name__ - return state + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state - def __setstate__(self, state): - for key in state: - self.__dict__[key] = state[key] + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] class _ReleaseAlias(_Release): - def __init__(self, alias, release): - self.alias = alias - self.release = release - super(_ReleaseAlias, self).__init__(self.release.codename, self.release.version) + def __init__(self, alias, release): + self.alias = alias + self.release = release + super(_ReleaseAlias, self).__init__(self.release.codename, self.release.version) - def __str__(self): - return self.alias + def __str__(self): + return self.alias sid = _Release('sid', 10) @@ -54,15 +54,15 @@ oldstable = _ReleaseAlias('oldstable', wheezy) def get_release(release_name): - """Normalizes the release codenames - This allows tasks to query for release codenames rather than 'stable', 'unstable' etc. - """ - from . import releases - release = getattr(releases, release_name, None) - if release is None or not isinstance(release, _Release): - raise UnknownReleaseException('The release `{name}\' is unknown'.format(name=release)) - return release + """Normalizes the release codenames + This allows tasks to query for release codenames rather than 'stable', 'unstable' etc. + """ + from . import releases + release = getattr(releases, release_name, None) + if release is None or not isinstance(release, _Release): + raise UnknownReleaseException('The release `{name}\' is unknown'.format(name=release)) + return release class UnknownReleaseException(Exception): - pass + pass diff --git a/bootstrapvz/common/sectors.py b/bootstrapvz/common/sectors.py index d658140..37a828e 100644 --- a/bootstrapvz/common/sectors.py +++ b/bootstrapvz/common/sectors.py @@ -3,176 +3,176 @@ from bytes import Bytes def onlysectors(msg): - def decorator(func): - def check_other(self, other): - if not isinstance(other, Sectors): - raise UnitError(msg) - return func(self, other) - return check_other - return decorator + def decorator(func): + def check_other(self, other): + if not isinstance(other, Sectors): + raise UnitError(msg) + return func(self, other) + return check_other + return decorator class Sectors(object): - def __init__(self, quantity, sector_size): - if isinstance(sector_size, Bytes): - self.sector_size = sector_size - else: - self.sector_size = Bytes(sector_size) + def __init__(self, quantity, sector_size): + if isinstance(sector_size, Bytes): + self.sector_size = sector_size + else: + self.sector_size = Bytes(sector_size) - if isinstance(quantity, Bytes): - self.bytes = quantity - else: - if isinstance(quantity, (int, long)): - self.bytes = self.sector_size * quantity - else: - self.bytes = Bytes(quantity) + if isinstance(quantity, Bytes): + self.bytes = quantity + else: + if isinstance(quantity, (int, long)): + self.bytes = self.sector_size * quantity + else: + self.bytes = Bytes(quantity) - def get_sectors(self): - return self.bytes / self.sector_size + def get_sectors(self): + return self.bytes / self.sector_size - def __repr__(self): - return str(self.get_sectors()) + 's' + def __repr__(self): + return str(self.get_sectors()) + 's' - def __str__(self): - return self.__repr__() + def __str__(self): + return self.__repr__() - def __int__(self): - return self.get_sectors() + def __int__(self): + return self.get_sectors() - def __long__(self): - return self.get_sectors() + def __long__(self): + return self.get_sectors() - @onlysectors('Can only compare sectors with sectors') - def __lt__(self, other): - return self.bytes < other.bytes + @onlysectors('Can only compare sectors with sectors') + def __lt__(self, other): + return self.bytes < other.bytes - @onlysectors('Can only compare sectors with sectors') - def __le__(self, other): - return self.bytes <= other.bytes + @onlysectors('Can only compare sectors with sectors') + def __le__(self, other): + return self.bytes <= other.bytes - @onlysectors('Can only compare sectors with sectors') - def __eq__(self, other): - return self.bytes == other.bytes + @onlysectors('Can only compare sectors with sectors') + def __eq__(self, other): + return self.bytes == other.bytes - @onlysectors('Can only compare sectors with sectors') - def __ne__(self, other): - return self.bytes != other.bytes + @onlysectors('Can only compare sectors with sectors') + def __ne__(self, other): + return self.bytes != other.bytes - @onlysectors('Can only compare sectors with sectors') - def __ge__(self, other): - return self.bytes >= other.bytes + @onlysectors('Can only compare sectors with sectors') + def __ge__(self, other): + return self.bytes >= other.bytes - @onlysectors('Can only compare sectors with sectors') - def __gt__(self, other): - return self.bytes > other.bytes + @onlysectors('Can only compare sectors with sectors') + def __gt__(self, other): + return self.bytes > other.bytes - def __add__(self, other): - if isinstance(other, (int, long)): - return Sectors(self.bytes + self.sector_size * other, self.sector_size) - if isinstance(other, Bytes): - return Sectors(self.bytes + other, self.sector_size) - if isinstance(other, Sectors): - if self.sector_size != other.sector_size: - raise UnitError('Cannot sum sectors with different sector sizes') - return Sectors(self.bytes + other.bytes, self.sector_size) - raise UnitError('Can only add sectors, bytes or integers to sectors') + def __add__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes + self.sector_size * other, self.sector_size) + if isinstance(other, Bytes): + return Sectors(self.bytes + other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot sum sectors with different sector sizes') + return Sectors(self.bytes + other.bytes, self.sector_size) + raise UnitError('Can only add sectors, bytes or integers to sectors') - def __iadd__(self, other): - if isinstance(other, (int, long)): - self.bytes += self.sector_size * other - return self - if isinstance(other, Bytes): - self.bytes += other - return self - if isinstance(other, Sectors): - if self.sector_size != other.sector_size: - raise UnitError('Cannot sum sectors with different sector sizes') - self.bytes += other.bytes - return self - raise UnitError('Can only add sectors, bytes or integers to sectors') + def __iadd__(self, other): + if isinstance(other, (int, long)): + self.bytes += self.sector_size * other + return self + if isinstance(other, Bytes): + self.bytes += other + return self + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot sum sectors with different sector sizes') + self.bytes += other.bytes + return self + raise UnitError('Can only add sectors, bytes or integers to sectors') - def __sub__(self, other): - if isinstance(other, (int, long)): - return Sectors(self.bytes - self.sector_size * other, self.sector_size) - if isinstance(other, Bytes): - return Sectors(self.bytes - other, self.sector_size) - if isinstance(other, Sectors): - if self.sector_size != other.sector_size: - raise UnitError('Cannot subtract sectors with different sector sizes') - return Sectors(self.bytes - other.bytes, self.sector_size) - raise UnitError('Can only subtract sectors, bytes or integers from sectors') + def __sub__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes - self.sector_size * other, self.sector_size) + if isinstance(other, Bytes): + return Sectors(self.bytes - other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot subtract sectors with different sector sizes') + return Sectors(self.bytes - other.bytes, self.sector_size) + raise UnitError('Can only subtract sectors, bytes or integers from sectors') - def __isub__(self, other): - if isinstance(other, (int, long)): - self.bytes -= self.sector_size * other - return self - if isinstance(other, Bytes): - self.bytes -= other - return self - if isinstance(other, Sectors): - if self.sector_size != other.sector_size: - raise UnitError('Cannot subtract sectors with different sector sizes') - self.bytes -= other.bytes - return self - raise UnitError('Can only subtract sectors, bytes or integers from sectors') + def __isub__(self, other): + if isinstance(other, (int, long)): + self.bytes -= self.sector_size * other + return self + if isinstance(other, Bytes): + self.bytes -= other + return self + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot subtract sectors with different sector sizes') + self.bytes -= other.bytes + return self + raise UnitError('Can only subtract sectors, bytes or integers from sectors') - def __mul__(self, other): - if isinstance(other, (int, long)): - return Sectors(self.bytes * other, self.sector_size) - else: - raise UnitError('Can only multiply sectors with integers') + def __mul__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes * other, self.sector_size) + else: + raise UnitError('Can only multiply sectors with integers') - def __imul__(self, other): - if isinstance(other, (int, long)): - self.bytes *= other - return self - else: - raise UnitError('Can only multiply sectors with integers') + def __imul__(self, other): + if isinstance(other, (int, long)): + self.bytes *= other + return self + else: + raise UnitError('Can only multiply sectors with integers') - def __div__(self, other): - if isinstance(other, (int, long)): - return Sectors(self.bytes / other, self.sector_size) - if isinstance(other, Sectors): - if self.sector_size == other.sector_size: - return self.bytes / other.bytes - else: - raise UnitError('Cannot divide sectors with different sector sizes') - raise UnitError('Can only divide sectors with integers or sectors') + def __div__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes / other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size == other.sector_size: + return self.bytes / other.bytes + else: + raise UnitError('Cannot divide sectors with different sector sizes') + raise UnitError('Can only divide sectors with integers or sectors') - def __idiv__(self, other): - if isinstance(other, (int, long)): - self.bytes /= other - return self - if isinstance(other, Sectors): - if self.sector_size == other.sector_size: - self.bytes /= other.bytes - return self - else: - raise UnitError('Cannot divide sectors with different sector sizes') - raise UnitError('Can only divide sectors with integers or sectors') + def __idiv__(self, other): + if isinstance(other, (int, long)): + self.bytes /= other + return self + if isinstance(other, Sectors): + if self.sector_size == other.sector_size: + self.bytes /= other.bytes + return self + else: + raise UnitError('Cannot divide sectors with different sector sizes') + raise UnitError('Can only divide sectors with integers or sectors') - @onlysectors('Can only take modulus of sectors with sectors') - def __mod__(self, other): - if self.sector_size == other.sector_size: - return Sectors(self.bytes % other.bytes, self.sector_size) - else: - raise UnitError('Cannot take modulus of sectors with different sector sizes') + @onlysectors('Can only take modulus of sectors with sectors') + def __mod__(self, other): + if self.sector_size == other.sector_size: + return Sectors(self.bytes % other.bytes, self.sector_size) + else: + raise UnitError('Cannot take modulus of sectors with different sector sizes') - @onlysectors('Can only take modulus of sectors with sectors') - def __imod__(self, other): - if self.sector_size == other.sector_size: - self.bytes %= other.bytes - return self - else: - raise UnitError('Cannot take modulus of sectors with different sector sizes') + @onlysectors('Can only take modulus of sectors with sectors') + def __imod__(self, other): + if self.sector_size == other.sector_size: + self.bytes %= other.bytes + return self + else: + raise UnitError('Cannot take modulus of sectors with different sector sizes') - def __getstate__(self): - return {'__class__': self.__module__ + '.' + self.__class__.__name__, - 'sector_size': self.sector_size, - 'bytes': self.bytes, - } + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'sector_size': self.sector_size, + 'bytes': self.bytes, + } - def __setstate__(self, state): - self.sector_size = state['sector_size'] - self.bytes = state['bytes'] + def __setstate__(self, state): + self.sector_size = state['sector_size'] + self.bytes = state['bytes'] diff --git a/bootstrapvz/common/task_groups.py b/bootstrapvz/common/task_groups.py index 6320d22..d47e197 100644 --- a/bootstrapvz/common/task_groups.py +++ b/bootstrapvz/common/task_groups.py @@ -20,39 +20,39 @@ from tasks import folder def get_standard_groups(manifest): - group = [] - group.extend(get_base_group(manifest)) - group.extend(volume_group) - if manifest.volume['partitions']['type'] != 'none': - group.extend(partitioning_group) - if 'boot' in manifest.volume['partitions']: - group.extend(boot_partition_group) - group.extend(mounting_group) - group.extend(kernel_group) - group.extend(get_fs_specific_group(manifest)) - group.extend(get_network_group(manifest)) - group.extend(get_apt_group(manifest)) - group.extend(security_group) - group.extend(get_locale_group(manifest)) - group.extend(get_bootloader_group(manifest)) - group.extend(cleanup_group) - return group + group = [] + group.extend(get_base_group(manifest)) + group.extend(volume_group) + if manifest.volume['partitions']['type'] != 'none': + group.extend(partitioning_group) + if 'boot' in manifest.volume['partitions']: + group.extend(boot_partition_group) + group.extend(mounting_group) + group.extend(kernel_group) + group.extend(get_fs_specific_group(manifest)) + group.extend(get_network_group(manifest)) + group.extend(get_apt_group(manifest)) + group.extend(security_group) + group.extend(get_locale_group(manifest)) + group.extend(get_bootloader_group(manifest)) + group.extend(cleanup_group) + return group def get_base_group(manifest): - group = [workspace.CreateWorkspace, - bootstrap.AddRequiredCommands, - host.CheckExternalCommands, - bootstrap.Bootstrap, - workspace.DeleteWorkspace, - ] - if manifest.bootstrapper.get('tarball', False): - group.append(bootstrap.MakeTarball) - if manifest.bootstrapper.get('include_packages', False): - group.append(bootstrap.IncludePackagesInBootstrap) - if manifest.bootstrapper.get('exclude_packages', False): - group.append(bootstrap.ExcludePackagesInBootstrap) - return group + group = [workspace.CreateWorkspace, + bootstrap.AddRequiredCommands, + host.CheckExternalCommands, + bootstrap.Bootstrap, + workspace.DeleteWorkspace, + ] + if manifest.bootstrapper.get('tarball', False): + group.append(bootstrap.MakeTarball) + if manifest.bootstrapper.get('include_packages', False): + group.append(bootstrap.IncludePackagesInBootstrap) + if manifest.bootstrapper.get('exclude_packages', False): + group.append(bootstrap.ExcludePackagesInBootstrap) + return group volume_group = [volume.Attach, @@ -95,95 +95,95 @@ ssh_group = [ssh.AddOpenSSHPackage, def get_network_group(manifest): - if manifest.bootstrapper.get('variant', None) == 'minbase': - # minbase has no networking - return [] - group = [network.ConfigureNetworkIF, - network.RemoveDNSInfo] - if manifest.system.get('hostname', False): - group.append(network.SetHostname) - else: - group.append(network.RemoveHostname) - return group + if manifest.bootstrapper.get('variant', None) == 'minbase': + # minbase has no networking + return [] + group = [network.ConfigureNetworkIF, + network.RemoveDNSInfo] + if manifest.system.get('hostname', False): + group.append(network.SetHostname) + else: + group.append(network.RemoveHostname) + return group def get_apt_group(manifest): - group = [apt.AddDefaultSources, - apt.WriteSources, - apt.DisableDaemonAutostart, - apt.AptUpdate, - apt.AptUpgrade, - packages.InstallPackages, - apt.PurgeUnusedPackages, - apt.AptClean, - apt.EnableDaemonAutostart, - ] - if 'sources' in manifest.packages: - group.append(apt.AddManifestSources) - if 'trusted-keys' in manifest.packages: - group.append(apt.InstallTrustedKeys) - if 'preferences' in manifest.packages: - group.append(apt.AddManifestPreferences) - group.append(apt.WritePreferences) - if 'apt.conf.d' in manifest.packages: - group.append(apt.WriteConfiguration) - if 'install' in manifest.packages: - group.append(packages.AddManifestPackages) - if manifest.packages.get('install_standard', False): - group.append(packages.AddTaskselStandardPackages) - return group + group = [apt.AddDefaultSources, + apt.WriteSources, + apt.DisableDaemonAutostart, + apt.AptUpdate, + apt.AptUpgrade, + packages.InstallPackages, + apt.PurgeUnusedPackages, + apt.AptClean, + apt.EnableDaemonAutostart, + ] + if 'sources' in manifest.packages: + group.append(apt.AddManifestSources) + if 'trusted-keys' in manifest.packages: + group.append(apt.InstallTrustedKeys) + if 'preferences' in manifest.packages: + group.append(apt.AddManifestPreferences) + group.append(apt.WritePreferences) + if 'apt.conf.d' in manifest.packages: + group.append(apt.WriteConfiguration) + if 'install' in manifest.packages: + group.append(packages.AddManifestPackages) + if manifest.packages.get('install_standard', False): + group.append(packages.AddTaskselStandardPackages) + return group security_group = [security.EnableShadowConfig] def get_locale_group(manifest): - from bootstrapvz.common.releases import jessie - group = [ - locale.LocaleBootstrapPackage, - locale.GenerateLocale, - locale.SetTimezone, - ] - if manifest.release > jessie: - group.append(locale.SetLocalTimeLink) - else: - group.append(locale.SetLocalTimeCopy) - return group + from bootstrapvz.common.releases import jessie + group = [ + locale.LocaleBootstrapPackage, + locale.GenerateLocale, + locale.SetTimezone, + ] + if manifest.release > jessie: + group.append(locale.SetLocalTimeLink) + else: + group.append(locale.SetLocalTimeCopy) + return group def get_bootloader_group(manifest): - from bootstrapvz.common.releases import jessie - group = [] - if manifest.system['bootloader'] == 'grub': - group.extend([grub.AddGrubPackage, - grub.ConfigureGrub]) - if manifest.release < jessie: - group.append(grub.InstallGrub_1_99) - else: - group.append(grub.InstallGrub_2) - if manifest.system['bootloader'] == 'extlinux': - group.append(extlinux.AddExtlinuxPackage) - if manifest.release < jessie: - group.extend([extlinux.ConfigureExtlinux, - extlinux.InstallExtlinux]) - else: - group.extend([extlinux.ConfigureExtlinuxJessie, - extlinux.InstallExtlinuxJessie]) - return group + from bootstrapvz.common.releases import jessie + group = [] + if manifest.system['bootloader'] == 'grub': + group.extend([grub.AddGrubPackage, + grub.ConfigureGrub]) + if manifest.release < jessie: + group.append(grub.InstallGrub_1_99) + else: + group.append(grub.InstallGrub_2) + if manifest.system['bootloader'] == 'extlinux': + group.append(extlinux.AddExtlinuxPackage) + if manifest.release < jessie: + group.extend([extlinux.ConfigureExtlinux, + extlinux.InstallExtlinux]) + else: + group.extend([extlinux.ConfigureExtlinuxJessie, + extlinux.InstallExtlinuxJessie]) + return group def get_fs_specific_group(manifest): - partitions = manifest.volume['partitions'] - fs_specific_tasks = {'ext2': [filesystem.TuneVolumeFS], - 'ext3': [filesystem.TuneVolumeFS], - 'ext4': [filesystem.TuneVolumeFS], - 'xfs': [filesystem.AddXFSProgs], - } - group = set() - if 'boot' in partitions: - group.update(fs_specific_tasks.get(partitions['boot']['filesystem'], [])) - if 'root' in partitions: - group.update(fs_specific_tasks.get(partitions['root']['filesystem'], [])) - return list(group) + partitions = manifest.volume['partitions'] + fs_specific_tasks = {'ext2': [filesystem.TuneVolumeFS], + 'ext3': [filesystem.TuneVolumeFS], + 'ext4': [filesystem.TuneVolumeFS], + 'xfs': [filesystem.AddXFSProgs], + } + group = set() + if 'boot' in partitions: + group.update(fs_specific_tasks.get(partitions['boot']['filesystem'], [])) + if 'root' in partitions: + group.update(fs_specific_tasks.get(partitions['root']['filesystem'], [])) + return list(group) cleanup_group = [cleanup.ClearMOTD, @@ -202,11 +202,11 @@ rollback_map = {workspace.CreateWorkspace: workspace.DeleteWorkspace, def get_standard_rollback_tasks(completed): - rollback_tasks = set() - for task in completed: - if task not in rollback_map: - continue - counter = rollback_map[task] - if task in completed and counter not in completed: - rollback_tasks.add(counter) - return rollback_tasks + rollback_tasks = set() + for task in completed: + if task not in rollback_map: + continue + counter = rollback_map[task] + if task in completed and counter not in completed: + rollback_tasks.add(counter) + return rollback_tasks diff --git a/bootstrapvz/common/tasks/boot.py b/bootstrapvz/common/tasks/boot.py index afa257f..6489e35 100644 --- a/bootstrapvz/common/tasks/boot.py +++ b/bootstrapvz/common/tasks/boot.py @@ -5,48 +5,48 @@ from . import assets class UpdateInitramfs(Task): - description = 'Updating initramfs' - phase = phases.system_modification + description = 'Updating initramfs' + phase = phases.system_modification - @classmethod - def run(cls, info): - from ..tools import log_check_call - log_check_call(['chroot', info.root, 'update-initramfs', '-u']) + @classmethod + def run(cls, info): + from ..tools import log_check_call + log_check_call(['chroot', info.root, 'update-initramfs', '-u']) class BlackListModules(Task): - description = 'Blacklisting kernel modules' - phase = phases.system_modification - successors = [UpdateInitramfs] + description = 'Blacklisting kernel modules' + phase = phases.system_modification + successors = [UpdateInitramfs] - @classmethod - def run(cls, info): - blacklist_path = os.path.join(info.root, 'etc/modprobe.d/blacklist.conf') - with open(blacklist_path, 'a') as blacklist: - blacklist.write(('# disable pc speaker and floppy\n' - 'blacklist pcspkr\n' - 'blacklist floppy\n')) + @classmethod + def run(cls, info): + blacklist_path = os.path.join(info.root, 'etc/modprobe.d/blacklist.conf') + with open(blacklist_path, 'a') as blacklist: + blacklist.write(('# disable pc speaker and floppy\n' + 'blacklist pcspkr\n' + 'blacklist floppy\n')) class DisableGetTTYs(Task): - description = 'Disabling getty processes' - phase = phases.system_modification + description = 'Disabling getty processes' + phase = phases.system_modification - @classmethod - def run(cls, info): - # Forward compatible check for jessie - from bootstrapvz.common.releases import jessie - if info.manifest.release < jessie: - from ..tools import sed_i - inittab_path = os.path.join(info.root, 'etc/inittab') - tty1 = '1:2345:respawn:/sbin/getty 38400 tty1' - sed_i(inittab_path, '^' + tty1, '#' + tty1) - ttyx = ':23:respawn:/sbin/getty 38400 tty' - for i in range(2, 7): - i = str(i) - sed_i(inittab_path, '^' + i + ttyx + i, '#' + i + ttyx + i) - else: - from shutil import copy - logind_asset_path = os.path.join(assets, 'systemd/logind.conf') - logind_destination = os.path.join(info.root, 'etc/systemd/logind.conf') - copy(logind_asset_path, logind_destination) + @classmethod + def run(cls, info): + # Forward compatible check for jessie + from bootstrapvz.common.releases import jessie + if info.manifest.release < jessie: + from ..tools import sed_i + inittab_path = os.path.join(info.root, 'etc/inittab') + tty1 = '1:2345:respawn:/sbin/getty 38400 tty1' + sed_i(inittab_path, '^' + tty1, '#' + tty1) + ttyx = ':23:respawn:/sbin/getty 38400 tty' + for i in range(2, 7): + i = str(i) + sed_i(inittab_path, '^' + i + ttyx + i, '#' + i + ttyx + i) + else: + from shutil import copy + logind_asset_path = os.path.join(assets, 'systemd/logind.conf') + logind_destination = os.path.join(info.root, 'etc/systemd/logind.conf') + copy(logind_asset_path, logind_destination) diff --git a/bootstrapvz/common/tasks/bootstrap.py b/bootstrapvz/common/tasks/bootstrap.py index ceceb37..037bf80 100644 --- a/bootstrapvz/common/tasks/bootstrap.py +++ b/bootstrapvz/common/tasks/bootstrap.py @@ -8,107 +8,107 @@ log = logging.getLogger(__name__) class AddRequiredCommands(Task): - description = 'Adding commands required for bootstrapping Debian' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for bootstrapping Debian' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - info.host_dependencies['debootstrap'] = 'debootstrap' + @classmethod + def run(cls, info): + info.host_dependencies['debootstrap'] = 'debootstrap' def get_bootstrap_args(info): - executable = ['debootstrap'] - arch = info.manifest.system.get('userspace_architecture', info.manifest.system.get('architecture')) - options = ['--arch=' + arch] - if 'variant' in info.manifest.bootstrapper: - options.append('--variant=' + info.manifest.bootstrapper['variant']) - if len(info.include_packages) > 0: - options.append('--include=' + ','.join(info.include_packages)) - if len(info.exclude_packages) > 0: - options.append('--exclude=' + ','.join(info.exclude_packages)) - mirror = info.manifest.bootstrapper.get('mirror', info.apt_mirror) - arguments = [info.manifest.system['release'], info.root, mirror] - return executable, options, arguments + executable = ['debootstrap'] + arch = info.manifest.system.get('userspace_architecture', info.manifest.system.get('architecture')) + options = ['--arch=' + arch] + if 'variant' in info.manifest.bootstrapper: + options.append('--variant=' + info.manifest.bootstrapper['variant']) + if len(info.include_packages) > 0: + options.append('--include=' + ','.join(info.include_packages)) + if len(info.exclude_packages) > 0: + options.append('--exclude=' + ','.join(info.exclude_packages)) + mirror = info.manifest.bootstrapper.get('mirror', info.apt_mirror) + arguments = [info.manifest.system['release'], info.root, mirror] + return executable, options, arguments def get_tarball_filename(info): - from hashlib import sha1 - executable, options, arguments = get_bootstrap_args(info) - # Filter info.root which points at /target/volume-id, we won't ever hit anything with that in there. - hash_args = [arg for arg in arguments if arg != info.root] - tarball_id = sha1(repr(frozenset(options + hash_args))).hexdigest()[0:8] - tarball_filename = 'debootstrap-' + tarball_id + '.tar' - return os.path.join(info.manifest.bootstrapper['workspace'], tarball_filename) + from hashlib import sha1 + executable, options, arguments = get_bootstrap_args(info) + # Filter info.root which points at /target/volume-id, we won't ever hit anything with that in there. + hash_args = [arg for arg in arguments if arg != info.root] + tarball_id = sha1(repr(frozenset(options + hash_args))).hexdigest()[0:8] + tarball_filename = 'debootstrap-' + tarball_id + '.tar' + return os.path.join(info.manifest.bootstrapper['workspace'], tarball_filename) class MakeTarball(Task): - description = 'Creating bootstrap tarball' - phase = phases.os_installation + description = 'Creating bootstrap tarball' + phase = phases.os_installation - @classmethod - def run(cls, info): - executable, options, arguments = get_bootstrap_args(info) - tarball = get_tarball_filename(info) - if os.path.isfile(tarball): - log.debug('Found matching tarball, skipping creation') - else: - from ..tools import log_call - status, out, err = log_call(executable + options + ['--make-tarball=' + tarball] + arguments) - if status not in [0, 1]: # variant=minbase exits with 0 - msg = 'debootstrap exited with status {status}, it should exit with status 0 or 1'.format(status=status) - raise TaskError(msg) + @classmethod + def run(cls, info): + executable, options, arguments = get_bootstrap_args(info) + tarball = get_tarball_filename(info) + if os.path.isfile(tarball): + log.debug('Found matching tarball, skipping creation') + else: + from ..tools import log_call + status, out, err = log_call(executable + options + ['--make-tarball=' + tarball] + arguments) + if status not in [0, 1]: # variant=minbase exits with 0 + msg = 'debootstrap exited with status {status}, it should exit with status 0 or 1'.format(status=status) + raise TaskError(msg) class Bootstrap(Task): - description = 'Installing Debian' - phase = phases.os_installation - predecessors = [MakeTarball] + description = 'Installing Debian' + phase = phases.os_installation + predecessors = [MakeTarball] - @classmethod - def run(cls, info): - executable, options, arguments = get_bootstrap_args(info) - tarball = get_tarball_filename(info) - if os.path.isfile(tarball): - if not info.manifest.bootstrapper.get('tarball', False): - # Only shows this message if it hasn't tried to create the tarball - log.debug('Found matching tarball, skipping download') - options.extend(['--unpack-tarball=' + tarball]) + @classmethod + def run(cls, info): + executable, options, arguments = get_bootstrap_args(info) + tarball = get_tarball_filename(info) + if os.path.isfile(tarball): + if not info.manifest.bootstrapper.get('tarball', False): + # Only shows this message if it hasn't tried to create the tarball + log.debug('Found matching tarball, skipping download') + options.extend(['--unpack-tarball=' + tarball]) - if info.bootstrap_script is not None: - # Optional bootstrapping script to modify the bootstrapping process - arguments.append(info.bootstrap_script) + if info.bootstrap_script is not None: + # Optional bootstrapping script to modify the bootstrapping process + arguments.append(info.bootstrap_script) - try: - from ..tools import log_check_call - log_check_call(executable + options + arguments) - except KeyboardInterrupt: - # Sometimes ../root/sys and ../root/proc are still mounted when - # quitting debootstrap prematurely. This break the cleanup process, - # so we unmount manually (ignore the exit code, the dirs may not be mounted). - from ..tools import log_call - log_call(['umount', os.path.join(info.root, 'sys')]) - log_call(['umount', os.path.join(info.root, 'proc')]) - raise + try: + from ..tools import log_check_call + log_check_call(executable + options + arguments) + except KeyboardInterrupt: + # Sometimes ../root/sys and ../root/proc are still mounted when + # quitting debootstrap prematurely. This break the cleanup process, + # so we unmount manually (ignore the exit code, the dirs may not be mounted). + from ..tools import log_call + log_call(['umount', os.path.join(info.root, 'sys')]) + log_call(['umount', os.path.join(info.root, 'proc')]) + raise class IncludePackagesInBootstrap(Task): - description = 'Add packages in the bootstrap phase' - phase = phases.preparation + description = 'Add packages in the bootstrap phase' + phase = phases.preparation - @classmethod - def run(cls, info): - info.include_packages.update( - set(info.manifest.bootstrapper['include_packages']) - ) + @classmethod + def run(cls, info): + info.include_packages.update( + set(info.manifest.bootstrapper['include_packages']) + ) class ExcludePackagesInBootstrap(Task): - description = 'Remove packages from bootstrap phase' - phase = phases.preparation + description = 'Remove packages from bootstrap phase' + phase = phases.preparation - @classmethod - def run(cls, info): - info.exclude_packages.update( - set(info.manifest.bootstrapper['exclude_packages']) - ) + @classmethod + def run(cls, info): + info.exclude_packages.update( + set(info.manifest.bootstrapper['exclude_packages']) + ) diff --git a/bootstrapvz/common/tasks/cleanup.py b/bootstrapvz/common/tasks/cleanup.py index 471c1e8..442f8f8 100644 --- a/bootstrapvz/common/tasks/cleanup.py +++ b/bootstrapvz/common/tasks/cleanup.py @@ -5,28 +5,28 @@ import shutil class ClearMOTD(Task): - description = 'Clearing the MOTD' - phase = phases.system_cleaning + description = 'Clearing the MOTD' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - with open('/var/run/motd', 'w'): - pass + @classmethod + def run(cls, info): + with open('/var/run/motd', 'w'): + pass class CleanTMP(Task): - description = 'Removing temporary files' - phase = phases.system_cleaning + description = 'Removing temporary files' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - tmp = os.path.join(info.root, 'tmp') - for tmp_file in [os.path.join(tmp, f) for f in os.listdir(tmp)]: - if os.path.isfile(tmp_file): - os.remove(tmp_file) - else: - shutil.rmtree(tmp_file) + @classmethod + def run(cls, info): + tmp = os.path.join(info.root, 'tmp') + for tmp_file in [os.path.join(tmp, f) for f in os.listdir(tmp)]: + if os.path.isfile(tmp_file): + os.remove(tmp_file) + else: + shutil.rmtree(tmp_file) - log = os.path.join(info.root, 'var/log/') - os.remove(os.path.join(log, 'bootstrap.log')) - os.remove(os.path.join(log, 'dpkg.log')) + log = os.path.join(info.root, 'var/log/') + os.remove(os.path.join(log, 'bootstrap.log')) + os.remove(os.path.join(log, 'dpkg.log')) diff --git a/bootstrapvz/common/tasks/development.py b/bootstrapvz/common/tasks/development.py index 2fe8e18..28e3dfe 100644 --- a/bootstrapvz/common/tasks/development.py +++ b/bootstrapvz/common/tasks/development.py @@ -3,11 +3,11 @@ from .. import phases class TriggerRollback(Task): - phase = phases.cleaning + phase = phases.cleaning - description = 'Triggering a rollback by throwing an exception' + description = 'Triggering a rollback by throwing an exception' - @classmethod - def run(cls, info): - from ..exceptions import TaskError - raise TaskError('Trigger rollback') + @classmethod + def run(cls, info): + from ..exceptions import TaskError + raise TaskError('Trigger rollback') diff --git a/bootstrapvz/common/tasks/extlinux.py b/bootstrapvz/common/tasks/extlinux.py index 25da011..34f8527 100644 --- a/bootstrapvz/common/tasks/extlinux.py +++ b/bootstrapvz/common/tasks/extlinux.py @@ -8,107 +8,107 @@ import os class AddExtlinuxPackage(Task): - description = 'Adding extlinux package' - phase = phases.preparation + description = 'Adding extlinux package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('extlinux') - if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): - info.packages.add('syslinux-common') + @classmethod + def run(cls, info): + info.packages.add('extlinux') + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + info.packages.add('syslinux-common') class ConfigureExtlinux(Task): - description = 'Configuring extlinux' - phase = phases.system_modification - predecessors = [filesystem.FStab] + description = 'Configuring extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab] - @classmethod - def run(cls, info): - from bootstrapvz.common.releases import squeeze - if info.manifest.release == squeeze: - # On squeeze /etc/default/extlinux is generated when running extlinux-update - log_check_call(['chroot', info.root, - 'extlinux-update']) - from bootstrapvz.common.tools import sed_i - extlinux_def = os.path.join(info.root, 'etc/default/extlinux') - sed_i(extlinux_def, r'^EXTLINUX_PARAMETERS="([^"]+)"$', - r'EXTLINUX_PARAMETERS="\1 console=ttyS0"') + @classmethod + def run(cls, info): + from bootstrapvz.common.releases import squeeze + if info.manifest.release == squeeze: + # On squeeze /etc/default/extlinux is generated when running extlinux-update + log_check_call(['chroot', info.root, + 'extlinux-update']) + from bootstrapvz.common.tools import sed_i + extlinux_def = os.path.join(info.root, 'etc/default/extlinux') + sed_i(extlinux_def, r'^EXTLINUX_PARAMETERS="([^"]+)"$', + r'EXTLINUX_PARAMETERS="\1 console=ttyS0"') class InstallExtlinux(Task): - description = 'Installing extlinux' - phase = phases.system_modification - predecessors = [filesystem.FStab, ConfigureExtlinux] + description = 'Installing extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab, ConfigureExtlinux] - @classmethod - def run(cls, info): - if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): - bootloader = '/usr/lib/syslinux/gptmbr.bin' - else: - bootloader = '/usr/lib/extlinux/mbr.bin' - log_check_call(['chroot', info.root, - 'dd', 'bs=440', 'count=1', - 'if=' + bootloader, - 'of=' + info.volume.device_path]) - log_check_call(['chroot', info.root, - 'extlinux', - '--install', '/boot/extlinux']) - log_check_call(['chroot', info.root, - 'extlinux-update']) + @classmethod + def run(cls, info): + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + bootloader = '/usr/lib/syslinux/gptmbr.bin' + else: + bootloader = '/usr/lib/extlinux/mbr.bin' + log_check_call(['chroot', info.root, + 'dd', 'bs=440', 'count=1', + 'if=' + bootloader, + 'of=' + info.volume.device_path]) + log_check_call(['chroot', info.root, + 'extlinux', + '--install', '/boot/extlinux']) + log_check_call(['chroot', info.root, + 'extlinux-update']) class ConfigureExtlinuxJessie(Task): - description = 'Configuring extlinux' - phase = phases.system_modification + description = 'Configuring extlinux' + phase = phases.system_modification - @classmethod - def run(cls, info): - extlinux_path = os.path.join(info.root, 'boot/extlinux') - os.mkdir(extlinux_path) + @classmethod + def run(cls, info): + extlinux_path = os.path.join(info.root, 'boot/extlinux') + os.mkdir(extlinux_path) - from . import assets - with open(os.path.join(assets, 'extlinux/extlinux.conf')) as template: - extlinux_config_tpl = template.read() + from . import assets + with open(os.path.join(assets, 'extlinux/extlinux.conf')) as template: + extlinux_config_tpl = template.read() - config_vars = {'root_uuid': info.volume.partition_map.root.get_uuid(), - 'kernel_version': info.kernel_version} - # Check if / and /boot are on the same partition - # If not, /boot will actually be / when booting - if hasattr(info.volume.partition_map, 'boot'): - config_vars['boot_prefix'] = '' - else: - config_vars['boot_prefix'] = '/boot' + config_vars = {'root_uuid': info.volume.partition_map.root.get_uuid(), + 'kernel_version': info.kernel_version} + # Check if / and /boot are on the same partition + # If not, /boot will actually be / when booting + if hasattr(info.volume.partition_map, 'boot'): + config_vars['boot_prefix'] = '' + else: + config_vars['boot_prefix'] = '/boot' - extlinux_config = extlinux_config_tpl.format(**config_vars) + extlinux_config = extlinux_config_tpl.format(**config_vars) - with open(os.path.join(extlinux_path, 'extlinux.conf'), 'w') as extlinux_conf_handle: - extlinux_conf_handle.write(extlinux_config) + with open(os.path.join(extlinux_path, 'extlinux.conf'), 'w') as extlinux_conf_handle: + extlinux_conf_handle.write(extlinux_config) - # Copy the boot message - from shutil import copy - boot_txt_path = os.path.join(assets, 'extlinux/boot.txt') - copy(boot_txt_path, os.path.join(extlinux_path, 'boot.txt')) + # Copy the boot message + from shutil import copy + boot_txt_path = os.path.join(assets, 'extlinux/boot.txt') + copy(boot_txt_path, os.path.join(extlinux_path, 'boot.txt')) class InstallExtlinuxJessie(Task): - description = 'Installing extlinux' - phase = phases.system_modification - predecessors = [filesystem.FStab, ConfigureExtlinuxJessie] - # Make sure the kernel image is updated after we have installed the bootloader - successors = [kernel.UpdateInitramfs] + description = 'Installing extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab, ConfigureExtlinuxJessie] + # Make sure the kernel image is updated after we have installed the bootloader + successors = [kernel.UpdateInitramfs] - @classmethod - def run(cls, info): - if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): - # Yeah, somebody saw it fit to uppercase that folder in jessie. Why? BECAUSE - bootloader = '/usr/lib/EXTLINUX/gptmbr.bin' - else: - bootloader = '/usr/lib/EXTLINUX/mbr.bin' - log_check_call(['chroot', info.root, - 'dd', 'bs=440', 'count=1', - 'if=' + bootloader, - 'of=' + info.volume.device_path]) - log_check_call(['chroot', info.root, - 'extlinux', - '--install', '/boot/extlinux']) + @classmethod + def run(cls, info): + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + # Yeah, somebody saw it fit to uppercase that folder in jessie. Why? BECAUSE + bootloader = '/usr/lib/EXTLINUX/gptmbr.bin' + else: + bootloader = '/usr/lib/EXTLINUX/mbr.bin' + log_check_call(['chroot', info.root, + 'dd', 'bs=440', 'count=1', + 'if=' + bootloader, + 'of=' + info.volume.device_path]) + log_check_call(['chroot', info.root, + 'extlinux', + '--install', '/boot/extlinux']) diff --git a/bootstrapvz/common/tasks/filesystem.py b/bootstrapvz/common/tasks/filesystem.py index 9f348e0..7fa03ae 100644 --- a/bootstrapvz/common/tasks/filesystem.py +++ b/bootstrapvz/common/tasks/filesystem.py @@ -7,196 +7,196 @@ import volume class AddRequiredCommands(Task): - description = 'Adding commands required for formatting' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for formatting' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions): - info.host_dependencies['mkfs.xfs'] = 'xfsprogs' + @classmethod + def run(cls, info): + if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions): + info.host_dependencies['mkfs.xfs'] = 'xfsprogs' class Format(Task): - description = 'Formatting the volume' - phase = phases.volume_preparation + description = 'Formatting the volume' + phase = phases.volume_preparation - @classmethod - def run(cls, info): - from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition - for partition in info.volume.partition_map.partitions: - if isinstance(partition, UnformattedPartition): - continue - partition.format() + @classmethod + def run(cls, info): + from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition + for partition in info.volume.partition_map.partitions: + if isinstance(partition, UnformattedPartition): + continue + partition.format() class TuneVolumeFS(Task): - description = 'Tuning the bootstrap volume filesystem' - phase = phases.volume_preparation - predecessors = [Format] + description = 'Tuning the bootstrap volume filesystem' + phase = phases.volume_preparation + predecessors = [Format] - @classmethod - def run(cls, info): - from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition - import re - # Disable the time based filesystem check - for partition in info.volume.partition_map.partitions: - if isinstance(partition, UnformattedPartition): - continue - if re.match('^ext[2-4]$', partition.filesystem) is not None: - log_check_call(['tune2fs', '-i', '0', partition.device_path]) + @classmethod + def run(cls, info): + from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition + import re + # Disable the time based filesystem check + for partition in info.volume.partition_map.partitions: + if isinstance(partition, UnformattedPartition): + continue + if re.match('^ext[2-4]$', partition.filesystem) is not None: + log_check_call(['tune2fs', '-i', '0', partition.device_path]) class AddXFSProgs(Task): - description = 'Adding `xfsprogs\' to the image packages' - phase = phases.preparation + description = 'Adding `xfsprogs\' to the image packages' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('xfsprogs') + @classmethod + def run(cls, info): + info.packages.add('xfsprogs') class CreateMountDir(Task): - description = 'Creating mountpoint for the root partition' - phase = phases.volume_mounting + description = 'Creating mountpoint for the root partition' + phase = phases.volume_mounting - @classmethod - def run(cls, info): - import os - info.root = os.path.join(info.workspace, 'root') - os.makedirs(info.root) + @classmethod + def run(cls, info): + import os + info.root = os.path.join(info.workspace, 'root') + os.makedirs(info.root) class MountRoot(Task): - description = 'Mounting the root partition' - phase = phases.volume_mounting - predecessors = [CreateMountDir] + description = 'Mounting the root partition' + phase = phases.volume_mounting + predecessors = [CreateMountDir] - @classmethod - def run(cls, info): - info.volume.partition_map.root.mount(destination=info.root) + @classmethod + def run(cls, info): + info.volume.partition_map.root.mount(destination=info.root) class CreateBootMountDir(Task): - description = 'Creating mountpoint for the boot partition' - phase = phases.volume_mounting - predecessors = [MountRoot] + description = 'Creating mountpoint for the boot partition' + phase = phases.volume_mounting + predecessors = [MountRoot] - @classmethod - def run(cls, info): - import os.path - os.makedirs(os.path.join(info.root, 'boot')) + @classmethod + def run(cls, info): + import os.path + os.makedirs(os.path.join(info.root, 'boot')) class MountBoot(Task): - description = 'Mounting the boot partition' - phase = phases.volume_mounting - predecessors = [CreateBootMountDir] + description = 'Mounting the boot partition' + phase = phases.volume_mounting + predecessors = [CreateBootMountDir] - @classmethod - def run(cls, info): - p_map = info.volume.partition_map - p_map.root.add_mount(p_map.boot, 'boot') + @classmethod + def run(cls, info): + p_map = info.volume.partition_map + p_map.root.add_mount(p_map.boot, 'boot') class MountSpecials(Task): - description = 'Mounting special block devices' - phase = phases.os_installation - predecessors = [bootstrap.Bootstrap] + description = 'Mounting special block devices' + phase = phases.os_installation + predecessors = [bootstrap.Bootstrap] - @classmethod - def run(cls, info): - root = info.volume.partition_map.root - root.add_mount('/dev', 'dev', ['--bind']) - root.add_mount('none', 'proc', ['--types', 'proc']) - root.add_mount('none', 'sys', ['--types', 'sysfs']) - root.add_mount('none', 'dev/pts', ['--types', 'devpts']) + @classmethod + def run(cls, info): + root = info.volume.partition_map.root + root.add_mount('/dev', 'dev', ['--bind']) + root.add_mount('none', 'proc', ['--types', 'proc']) + root.add_mount('none', 'sys', ['--types', 'sysfs']) + root.add_mount('none', 'dev/pts', ['--types', 'devpts']) class CopyMountTable(Task): - description = 'Copying mtab from host system' - phase = phases.os_installation - predecessors = [MountSpecials] + description = 'Copying mtab from host system' + phase = phases.os_installation + predecessors = [MountSpecials] - @classmethod - def run(cls, info): - import shutil - import os.path - shutil.copy('/proc/mounts', os.path.join(info.root, 'etc/mtab')) + @classmethod + def run(cls, info): + import shutil + import os.path + shutil.copy('/proc/mounts', os.path.join(info.root, 'etc/mtab')) class UnmountRoot(Task): - description = 'Unmounting the bootstrap volume' - phase = phases.volume_unmounting - successors = [volume.Detach] + description = 'Unmounting the bootstrap volume' + phase = phases.volume_unmounting + successors = [volume.Detach] - @classmethod - def run(cls, info): - info.volume.partition_map.root.unmount() + @classmethod + def run(cls, info): + info.volume.partition_map.root.unmount() class RemoveMountTable(Task): - description = 'Removing mtab' - phase = phases.volume_unmounting - successors = [UnmountRoot] + description = 'Removing mtab' + phase = phases.volume_unmounting + successors = [UnmountRoot] - @classmethod - def run(cls, info): - import os - os.remove(os.path.join(info.root, 'etc/mtab')) + @classmethod + def run(cls, info): + import os + os.remove(os.path.join(info.root, 'etc/mtab')) class DeleteMountDir(Task): - description = 'Deleting mountpoint for the bootstrap volume' - phase = phases.volume_unmounting - predecessors = [UnmountRoot] + description = 'Deleting mountpoint for the bootstrap volume' + phase = phases.volume_unmounting + predecessors = [UnmountRoot] - @classmethod - def run(cls, info): - import os - os.rmdir(info.root) - del info.root + @classmethod + def run(cls, info): + import os + os.rmdir(info.root) + del info.root class FStab(Task): - description = 'Adding partitions to the fstab' - phase = phases.system_modification + description = 'Adding partitions to the fstab' + phase = phases.system_modification - @classmethod - def run(cls, info): - import os.path - p_map = info.volume.partition_map - mount_points = [{'path': '/', - 'partition': p_map.root, - 'dump': '1', - 'pass_num': '1', - }] - if hasattr(p_map, 'boot'): - mount_points.append({'path': '/boot', - 'partition': p_map.boot, - 'dump': '1', - 'pass_num': '2', - }) - if hasattr(p_map, 'swap'): - mount_points.append({'path': 'none', - 'partition': p_map.swap, - 'dump': '1', - 'pass_num': '0', - }) + @classmethod + def run(cls, info): + import os.path + p_map = info.volume.partition_map + mount_points = [{'path': '/', + 'partition': p_map.root, + 'dump': '1', + 'pass_num': '1', + }] + if hasattr(p_map, 'boot'): + mount_points.append({'path': '/boot', + 'partition': p_map.boot, + 'dump': '1', + 'pass_num': '2', + }) + if hasattr(p_map, 'swap'): + mount_points.append({'path': 'none', + 'partition': p_map.swap, + 'dump': '1', + 'pass_num': '0', + }) - fstab_lines = [] - for mount_point in mount_points: - partition = mount_point['partition'] - mount_opts = ['defaults'] - fstab_lines.append('UUID={uuid} {mountpoint} {filesystem} {mount_opts} {dump} {pass_num}' - .format(uuid=partition.get_uuid(), - mountpoint=mount_point['path'], - filesystem=partition.filesystem, - mount_opts=','.join(mount_opts), - dump=mount_point['dump'], - pass_num=mount_point['pass_num'])) + fstab_lines = [] + for mount_point in mount_points: + partition = mount_point['partition'] + mount_opts = ['defaults'] + fstab_lines.append('UUID={uuid} {mountpoint} {filesystem} {mount_opts} {dump} {pass_num}' + .format(uuid=partition.get_uuid(), + mountpoint=mount_point['path'], + filesystem=partition.filesystem, + mount_opts=','.join(mount_opts), + dump=mount_point['dump'], + pass_num=mount_point['pass_num'])) - fstab_path = os.path.join(info.root, 'etc/fstab') - with open(fstab_path, 'w') as fstab: - fstab.write('\n'.join(fstab_lines)) - fstab.write('\n') + fstab_path = os.path.join(info.root, 'etc/fstab') + with open(fstab_path, 'w') as fstab: + fstab.write('\n'.join(fstab_lines)) + fstab.write('\n') diff --git a/bootstrapvz/common/tasks/folder.py b/bootstrapvz/common/tasks/folder.py index b576993..2a55995 100644 --- a/bootstrapvz/common/tasks/folder.py +++ b/bootstrapvz/common/tasks/folder.py @@ -5,23 +5,23 @@ import workspace class Create(Task): - description = 'Creating volume folder' - phase = phases.volume_creation - successors = [volume.Attach] + description = 'Creating volume folder' + phase = phases.volume_creation + successors = [volume.Attach] - @classmethod - def run(cls, info): - import os.path - info.root = os.path.join(info.workspace, 'root') - info.volume.create(info.root) + @classmethod + def run(cls, info): + import os.path + info.root = os.path.join(info.workspace, 'root') + info.volume.create(info.root) class Delete(Task): - description = 'Deleting volume folder' - phase = phases.cleaning - successors = [workspace.DeleteWorkspace] + description = 'Deleting volume folder' + phase = phases.cleaning + successors = [workspace.DeleteWorkspace] - @classmethod - def run(cls, info): - info.volume.delete() - del info.root + @classmethod + def run(cls, info): + info.volume.delete() + del info.root diff --git a/bootstrapvz/common/tasks/grub.py b/bootstrapvz/common/tasks/grub.py index 5607dc1..b666e2c 100644 --- a/bootstrapvz/common/tasks/grub.py +++ b/bootstrapvz/common/tasks/grub.py @@ -8,82 +8,82 @@ import os.path class AddGrubPackage(Task): - description = 'Adding grub package' - phase = phases.preparation + description = 'Adding grub package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('grub-pc') + @classmethod + def run(cls, info): + info.packages.add('grub-pc') class ConfigureGrub(Task): - description = 'Configuring grub' - phase = phases.system_modification - predecessors = [filesystem.FStab] + description = 'Configuring grub' + phase = phases.system_modification + predecessors = [filesystem.FStab] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - grub_def = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') - sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT="quiet"', - 'GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"') - sed_i(grub_def, '^GRUB_TIMEOUT=[0-9]+', 'GRUB_TIMEOUT=0\n' - 'GRUB_HIDDEN_TIMEOUT=0\n' - 'GRUB_HIDDEN_TIMEOUT_QUIET=true') - sed_i(grub_def, '^#GRUB_DISABLE_RECOVERY="true"', 'GRUB_DISABLE_RECOVERY="true"') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + grub_def = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') + sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT="quiet"', + 'GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"') + sed_i(grub_def, '^GRUB_TIMEOUT=[0-9]+', 'GRUB_TIMEOUT=0\n' + 'GRUB_HIDDEN_TIMEOUT=0\n' + 'GRUB_HIDDEN_TIMEOUT_QUIET=true') + sed_i(grub_def, '^#GRUB_DISABLE_RECOVERY="true"', 'GRUB_DISABLE_RECOVERY="true"') class InstallGrub_1_99(Task): - description = 'Installing grub 1.99' - phase = phases.system_modification - predecessors = [filesystem.FStab] + description = 'Installing grub 1.99' + phase = phases.system_modification + predecessors = [filesystem.FStab] - @classmethod - def run(cls, info): - p_map = info.volume.partition_map + @classmethod + def run(cls, info): + p_map = info.volume.partition_map - # GRUB screws up when installing in chrooted environments - # so we fake a real harddisk with dmsetup. - # Guide here: http://ebroder.net/2009/08/04/installing-grub-onto-a-disk-image/ - from ..fs import unmounted - with unmounted(info.volume): - info.volume.link_dm_node() - if isinstance(p_map, partitionmaps.none.NoPartitions): - p_map.root.device_path = info.volume.device_path - try: - [device_path] = log_check_call(['readlink', '-f', info.volume.device_path]) - device_map_path = os.path.join(info.root, 'boot/grub/device.map') - partition_prefix = 'msdos' - if isinstance(p_map, partitionmaps.gpt.GPTPartitionMap): - partition_prefix = 'gpt' - with open(device_map_path, 'w') as device_map: - device_map.write('(hd0) {device_path}\n'.format(device_path=device_path)) - if not isinstance(p_map, partitionmaps.none.NoPartitions): - for idx, partition in enumerate(info.volume.partition_map.partitions): - device_map.write('(hd0,{prefix}{idx}) {device_path}\n' - .format(device_path=partition.device_path, - prefix=partition_prefix, - idx=idx + 1)) + # GRUB screws up when installing in chrooted environments + # so we fake a real harddisk with dmsetup. + # Guide here: http://ebroder.net/2009/08/04/installing-grub-onto-a-disk-image/ + from ..fs import unmounted + with unmounted(info.volume): + info.volume.link_dm_node() + if isinstance(p_map, partitionmaps.none.NoPartitions): + p_map.root.device_path = info.volume.device_path + try: + [device_path] = log_check_call(['readlink', '-f', info.volume.device_path]) + device_map_path = os.path.join(info.root, 'boot/grub/device.map') + partition_prefix = 'msdos' + if isinstance(p_map, partitionmaps.gpt.GPTPartitionMap): + partition_prefix = 'gpt' + with open(device_map_path, 'w') as device_map: + device_map.write('(hd0) {device_path}\n'.format(device_path=device_path)) + if not isinstance(p_map, partitionmaps.none.NoPartitions): + for idx, partition in enumerate(info.volume.partition_map.partitions): + device_map.write('(hd0,{prefix}{idx}) {device_path}\n' + .format(device_path=partition.device_path, + prefix=partition_prefix, + idx=idx + 1)) - # Install grub - log_check_call(['chroot', info.root, 'grub-install', device_path]) - log_check_call(['chroot', info.root, 'update-grub']) - finally: - with unmounted(info.volume): - info.volume.unlink_dm_node() - if isinstance(p_map, partitionmaps.none.NoPartitions): - p_map.root.device_path = info.volume.device_path + # Install grub + log_check_call(['chroot', info.root, 'grub-install', device_path]) + log_check_call(['chroot', info.root, 'update-grub']) + finally: + with unmounted(info.volume): + info.volume.unlink_dm_node() + if isinstance(p_map, partitionmaps.none.NoPartitions): + p_map.root.device_path = info.volume.device_path class InstallGrub_2(Task): - description = 'Installing grub 2' - phase = phases.system_modification - predecessors = [filesystem.FStab] - # Make sure the kernel image is updated after we have installed the bootloader - successors = [kernel.UpdateInitramfs] + description = 'Installing grub 2' + phase = phases.system_modification + predecessors = [filesystem.FStab] + # Make sure the kernel image is updated after we have installed the bootloader + successors = [kernel.UpdateInitramfs] - @classmethod - def run(cls, info): - log_check_call(['chroot', info.root, 'grub-install', info.volume.device_path]) - log_check_call(['chroot', info.root, 'update-grub']) + @classmethod + def run(cls, info): + log_check_call(['chroot', info.root, 'grub-install', info.volume.device_path]) + log_check_call(['chroot', info.root, 'update-grub']) diff --git a/bootstrapvz/common/tasks/host.py b/bootstrapvz/common/tasks/host.py index ad58208..7858056 100644 --- a/bootstrapvz/common/tasks/host.py +++ b/bootstrapvz/common/tasks/host.py @@ -4,28 +4,28 @@ from ..exceptions import TaskError class CheckExternalCommands(Task): - description = 'Checking availability of external commands' - phase = phases.preparation + description = 'Checking availability of external commands' + phase = phases.preparation - @classmethod - def run(cls, info): - from ..tools import log_check_call - from subprocess import CalledProcessError - import re - missing_packages = [] - for command, package in info.host_dependencies.items(): - try: - log_check_call(['type ' + command], shell=True) - except CalledProcessError: - if re.match('^https?:\/\/', package): - msg = ('The command `{command}\' is not available, ' - 'you can download the software at `{package}\'.' - .format(command=command, package=package)) - else: - msg = ('The command `{command}\' is not available, ' - 'it is located in the package `{package}\'.' - .format(command=command, package=package)) - missing_packages.append(msg) - if len(missing_packages) > 0: - msg = '\n'.join(missing_packages) - raise TaskError(msg) + @classmethod + def run(cls, info): + from ..tools import log_check_call + from subprocess import CalledProcessError + import re + missing_packages = [] + for command, package in info.host_dependencies.items(): + try: + log_check_call(['type ' + command], shell=True) + except CalledProcessError: + if re.match('^https?:\/\/', package): + msg = ('The command `{command}\' is not available, ' + 'you can download the software at `{package}\'.' + .format(command=command, package=package)) + else: + msg = ('The command `{command}\' is not available, ' + 'it is located in the package `{package}\'.' + .format(command=command, package=package)) + missing_packages.append(msg) + if len(missing_packages) > 0: + msg = '\n'.join(missing_packages) + raise TaskError(msg) diff --git a/bootstrapvz/common/tasks/image.py b/bootstrapvz/common/tasks/image.py index a23a3dd..30758e7 100644 --- a/bootstrapvz/common/tasks/image.py +++ b/bootstrapvz/common/tasks/image.py @@ -3,19 +3,19 @@ from bootstrapvz.common import phases class MoveImage(Task): - description = 'Moving volume image' - phase = phases.image_registration + description = 'Moving volume image' + phase = phases.image_registration - @classmethod - def run(cls, info): - image_name = info.manifest.name.format(**info.manifest_vars) - filename = image_name + '.' + info.volume.extension + @classmethod + def run(cls, info): + image_name = info.manifest.name.format(**info.manifest_vars) + filename = image_name + '.' + info.volume.extension - import os.path - destination = os.path.join(info.manifest.bootstrapper['workspace'], filename) - import shutil - shutil.move(info.volume.image_path, destination) - info.volume.image_path = destination - import logging - log = logging.getLogger(__name__) - log.info('The volume image has been moved to ' + destination) + import os.path + destination = os.path.join(info.manifest.bootstrapper['workspace'], filename) + import shutil + shutil.move(info.volume.image_path, destination) + info.volume.image_path = destination + import logging + log = logging.getLogger(__name__) + log.info('The volume image has been moved to ' + destination) diff --git a/bootstrapvz/common/tasks/initd.py b/bootstrapvz/common/tasks/initd.py index 4669b03..a7362e1 100644 --- a/bootstrapvz/common/tasks/initd.py +++ b/bootstrapvz/common/tasks/initd.py @@ -6,75 +6,75 @@ import os.path class InstallInitScripts(Task): - description = 'Installing startup scripts' - phase = phases.system_modification + description = 'Installing startup scripts' + phase = phases.system_modification - @classmethod - def run(cls, info): - import stat - rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | - stat.S_IRGRP | stat.S_IXGRP | - stat.S_IROTH | stat.S_IXOTH) - from shutil import copy - for name, src in info.initd['install'].iteritems(): - dst = os.path.join(info.root, 'etc/init.d', name) - copy(src, dst) - os.chmod(dst, rwxr_xr_x) - log_check_call(['chroot', info.root, 'insserv', '--default', name]) + @classmethod + def run(cls, info): + import stat + rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | + stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + from shutil import copy + for name, src in info.initd['install'].iteritems(): + dst = os.path.join(info.root, 'etc/init.d', name) + copy(src, dst) + os.chmod(dst, rwxr_xr_x) + log_check_call(['chroot', info.root, 'insserv', '--default', name]) - for name in info.initd['disable']: - log_check_call(['chroot', info.root, 'insserv', '--remove', name]) + for name in info.initd['disable']: + log_check_call(['chroot', info.root, 'insserv', '--remove', name]) class AddExpandRoot(Task): - description = 'Adding init script to expand the root volume' - phase = phases.system_modification - successors = [InstallInitScripts] + description = 'Adding init script to expand the root volume' + phase = phases.system_modification + successors = [InstallInitScripts] - @classmethod - def run(cls, info): - init_scripts_dir = os.path.join(assets, 'init.d') - info.initd['install']['expand-root'] = os.path.join(init_scripts_dir, 'expand-root') + @classmethod + def run(cls, info): + init_scripts_dir = os.path.join(assets, 'init.d') + info.initd['install']['expand-root'] = os.path.join(init_scripts_dir, 'expand-root') class RemoveHWClock(Task): - description = 'Removing hardware clock init scripts' - phase = phases.system_modification - successors = [InstallInitScripts] + description = 'Removing hardware clock init scripts' + phase = phases.system_modification + successors = [InstallInitScripts] - @classmethod - def run(cls, info): - from bootstrapvz.common.releases import squeeze - info.initd['disable'].append('hwclock.sh') - if info.manifest.release == squeeze: - info.initd['disable'].append('hwclockfirst.sh') + @classmethod + def run(cls, info): + from bootstrapvz.common.releases import squeeze + info.initd['disable'].append('hwclock.sh') + if info.manifest.release == squeeze: + info.initd['disable'].append('hwclockfirst.sh') class AdjustExpandRootScript(Task): - description = 'Adjusting the expand-root script' - phase = phases.system_modification - predecessors = [InstallInitScripts] + description = 'Adjusting the expand-root script' + phase = phases.system_modification + predecessors = [InstallInitScripts] - @classmethod - def run(cls, info): - from ..tools import sed_i - script = os.path.join(info.root, 'etc/init.d/expand-root') + @classmethod + def run(cls, info): + from ..tools import sed_i + script = os.path.join(info.root, 'etc/init.d/expand-root') - root_idx = info.volume.partition_map.root.get_index() - root_index_line = 'root_index="{idx}"'.format(idx=root_idx) - sed_i(script, '^root_index="0"$', root_index_line) + root_idx = info.volume.partition_map.root.get_index() + root_index_line = 'root_index="{idx}"'.format(idx=root_idx) + sed_i(script, '^root_index="0"$', root_index_line) - root_device_path = 'root_device_path="{device}"'.format(device=info.volume.device_path) - sed_i(script, '^root_device_path="/dev/xvda"$', root_device_path) + root_device_path = 'root_device_path="{device}"'.format(device=info.volume.device_path) + sed_i(script, '^root_device_path="/dev/xvda"$', root_device_path) class AdjustGrowpartWorkaround(Task): - description = 'Adjusting expand-root for growpart-workaround' - phase = phases.system_modification - predecessors = [AdjustExpandRootScript] + description = 'Adjusting expand-root for growpart-workaround' + phase = phases.system_modification + predecessors = [AdjustExpandRootScript] - @classmethod - def run(cls, info): - from ..tools import sed_i - script = os.path.join(info.root, 'etc/init.d/expand-root') - sed_i(script, '^growpart="growpart"$', 'growpart-workaround') + @classmethod + def run(cls, info): + from ..tools import sed_i + script = os.path.join(info.root, 'etc/init.d/expand-root') + sed_i(script, '^growpart="growpart"$', 'growpart-workaround') diff --git a/bootstrapvz/common/tasks/kernel.py b/bootstrapvz/common/tasks/kernel.py index 5acecf6..b19c39b 100644 --- a/bootstrapvz/common/tasks/kernel.py +++ b/bootstrapvz/common/tasks/kernel.py @@ -5,48 +5,48 @@ import logging class AddDKMSPackages(Task): - description = 'Adding DKMS and kernel header packages' - phase = phases.package_installation - successors = [packages.InstallPackages] + description = 'Adding DKMS and kernel header packages' + phase = phases.package_installation + successors = [packages.InstallPackages] - @classmethod - def run(cls, info): - info.packages.add('dkms') - kernel_pkg_arch = {'i386': '686-pae', 'amd64': 'amd64'}[info.manifest.system['architecture']] - info.packages.add('linux-headers-' + kernel_pkg_arch) + @classmethod + def run(cls, info): + info.packages.add('dkms') + kernel_pkg_arch = {'i386': '686-pae', 'amd64': 'amd64'}[info.manifest.system['architecture']] + info.packages.add('linux-headers-' + kernel_pkg_arch) class UpdateInitramfs(Task): - description = 'Rebuilding initramfs' - phase = phases.system_modification + description = 'Rebuilding initramfs' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - # Update initramfs (-u) for all currently installed kernel versions (-k all) - log_check_call(['chroot', info.root, 'update-initramfs', '-u', '-k', 'all']) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + # Update initramfs (-u) for all currently installed kernel versions (-k all) + log_check_call(['chroot', info.root, 'update-initramfs', '-u', '-k', 'all']) class DetermineKernelVersion(Task): - description = 'Determining kernel version' - phase = phases.package_installation - predecessors = [packages.InstallPackages] + description = 'Determining kernel version' + phase = phases.package_installation + predecessors = [packages.InstallPackages] - @classmethod - def run(cls, info): - # Snatched from `extlinux-update' in wheezy - # list the files in boot/ that match vmlinuz-* - # sort what the * matches, the first entry is the kernel version - import os.path - import re - regexp = re.compile('^vmlinuz-(?P.+)$') + @classmethod + def run(cls, info): + # Snatched from `extlinux-update' in wheezy + # list the files in boot/ that match vmlinuz-* + # sort what the * matches, the first entry is the kernel version + import os.path + import re + regexp = re.compile('^vmlinuz-(?P.+)$') - def get_kernel_version(vmlinuz_path): - vmlinux_basename = os.path.basename(vmlinuz_path) - return regexp.match(vmlinux_basename).group('version') - from glob import glob - boot = os.path.join(info.root, 'boot') - vmlinuz_paths = glob('{boot}/vmlinuz-*'.format(boot=boot)) - kernels = map(get_kernel_version, vmlinuz_paths) - info.kernel_version = sorted(kernels, reverse=True)[0] - logging.getLogger(__name__).debug('Kernel version is {version}'.format(version=info.kernel_version)) + def get_kernel_version(vmlinuz_path): + vmlinux_basename = os.path.basename(vmlinuz_path) + return regexp.match(vmlinux_basename).group('version') + from glob import glob + boot = os.path.join(info.root, 'boot') + vmlinuz_paths = glob('{boot}/vmlinuz-*'.format(boot=boot)) + kernels = map(get_kernel_version, vmlinuz_paths) + info.kernel_version = sorted(kernels, reverse=True)[0] + logging.getLogger(__name__).debug('Kernel version is {version}'.format(version=info.kernel_version)) diff --git a/bootstrapvz/common/tasks/locale.py b/bootstrapvz/common/tasks/locale.py index e414244..3609130 100644 --- a/bootstrapvz/common/tasks/locale.py +++ b/bootstrapvz/common/tasks/locale.py @@ -4,71 +4,71 @@ import os.path class LocaleBootstrapPackage(Task): - description = 'Adding locale package to bootstrap installation' - phase = phases.preparation + description = 'Adding locale package to bootstrap installation' + phase = phases.preparation - @classmethod - def run(cls, info): - # We could bootstrap without locales, but things just suck without them - # eg. error messages when running apt - info.include_packages.add('locales') + @classmethod + def run(cls, info): + # We could bootstrap without locales, but things just suck without them + # eg. error messages when running apt + info.include_packages.add('locales') class GenerateLocale(Task): - description = 'Generating system locale' - phase = phases.package_installation + description = 'Generating system locale' + phase = phases.package_installation - @classmethod - def run(cls, info): - from ..tools import sed_i - from ..tools import log_check_call + @classmethod + def run(cls, info): + from ..tools import sed_i + from ..tools import log_check_call - lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'], - charmap=info.manifest.system['charmap']) - locale_str = '{locale}.{charmap} {charmap}'.format(locale=info.manifest.system['locale'], - charmap=info.manifest.system['charmap']) + lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'], + charmap=info.manifest.system['charmap']) + locale_str = '{locale}.{charmap} {charmap}'.format(locale=info.manifest.system['locale'], + charmap=info.manifest.system['charmap']) - search = '# ' + locale_str - locale_gen = os.path.join(info.root, 'etc/locale.gen') - sed_i(locale_gen, search, locale_str) + search = '# ' + locale_str + locale_gen = os.path.join(info.root, 'etc/locale.gen') + sed_i(locale_gen, search, locale_str) - log_check_call(['chroot', info.root, 'locale-gen']) - log_check_call(['chroot', info.root, - 'update-locale', 'LANG=' + lang]) + log_check_call(['chroot', info.root, 'locale-gen']) + log_check_call(['chroot', info.root, + 'update-locale', 'LANG=' + lang]) class SetTimezone(Task): - description = 'Setting the selected timezone' - phase = phases.system_modification + description = 'Setting the selected timezone' + phase = phases.system_modification - @classmethod - def run(cls, info): - tz_path = os.path.join(info.root, 'etc/timezone') - timezone = info.manifest.system['timezone'] - with open(tz_path, 'w') as tz_file: - tz_file.write(timezone) + @classmethod + def run(cls, info): + tz_path = os.path.join(info.root, 'etc/timezone') + timezone = info.manifest.system['timezone'] + with open(tz_path, 'w') as tz_file: + tz_file.write(timezone) class SetLocalTimeLink(Task): - description = 'Setting the selected local timezone (link)' - phase = phases.system_modification + description = 'Setting the selected local timezone (link)' + phase = phases.system_modification - @classmethod - def run(cls, info): - timezone = info.manifest.system['timezone'] - localtime_path = os.path.join(info.root, 'etc/localtime') - os.unlink(localtime_path) - os.symlink(os.path.join('/usr/share/zoneinfo', timezone), localtime_path) + @classmethod + def run(cls, info): + timezone = info.manifest.system['timezone'] + localtime_path = os.path.join(info.root, 'etc/localtime') + os.unlink(localtime_path) + os.symlink(os.path.join('/usr/share/zoneinfo', timezone), localtime_path) class SetLocalTimeCopy(Task): - description = 'Setting the selected local timezone (copy)' - phase = phases.system_modification + description = 'Setting the selected local timezone (copy)' + phase = phases.system_modification - @classmethod - def run(cls, info): - from shutil import copy - timezone = info.manifest.system['timezone'] - zoneinfo_path = os.path.join(info.root, '/usr/share/zoneinfo', timezone) - localtime_path = os.path.join(info.root, 'etc/localtime') - copy(zoneinfo_path, localtime_path) + @classmethod + def run(cls, info): + from shutil import copy + timezone = info.manifest.system['timezone'] + zoneinfo_path = os.path.join(info.root, '/usr/share/zoneinfo', timezone) + localtime_path = os.path.join(info.root, 'etc/localtime') + copy(zoneinfo_path, localtime_path) diff --git a/bootstrapvz/common/tasks/loopback.py b/bootstrapvz/common/tasks/loopback.py index 721dad6..7215476 100644 --- a/bootstrapvz/common/tasks/loopback.py +++ b/bootstrapvz/common/tasks/loopback.py @@ -5,28 +5,28 @@ import volume class AddRequiredCommands(Task): - description = 'Adding commands required for creating loopback volumes' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for creating loopback volumes' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - from ..fs.loopbackvolume import LoopbackVolume - from ..fs.qemuvolume import QEMUVolume - if type(info.volume) is LoopbackVolume: - info.host_dependencies['losetup'] = 'mount' - info.host_dependencies['truncate'] = 'coreutils' - if isinstance(info.volume, QEMUVolume): - info.host_dependencies['qemu-img'] = 'qemu-utils' + @classmethod + def run(cls, info): + from ..fs.loopbackvolume import LoopbackVolume + from ..fs.qemuvolume import QEMUVolume + if type(info.volume) is LoopbackVolume: + info.host_dependencies['losetup'] = 'mount' + info.host_dependencies['truncate'] = 'coreutils' + if isinstance(info.volume, QEMUVolume): + info.host_dependencies['qemu-img'] = 'qemu-utils' class Create(Task): - description = 'Creating a loopback volume' - phase = phases.volume_creation - successors = [volume.Attach] + description = 'Creating a loopback volume' + phase = phases.volume_creation + successors = [volume.Attach] - @classmethod - def run(cls, info): - import os.path - image_path = os.path.join(info.workspace, 'volume.' + info.volume.extension) - info.volume.create(image_path) + @classmethod + def run(cls, info): + import os.path + image_path = os.path.join(info.workspace, 'volume.' + info.volume.extension) + info.volume.create(image_path) diff --git a/bootstrapvz/common/tasks/network.py b/bootstrapvz/common/tasks/network.py index c073867..dcf2c8b 100644 --- a/bootstrapvz/common/tasks/network.py +++ b/bootstrapvz/common/tasks/network.py @@ -4,51 +4,51 @@ import os class RemoveDNSInfo(Task): - description = 'Removing resolv.conf' - phase = phases.system_cleaning + description = 'Removing resolv.conf' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')): - os.remove(os.path.join(info.root, 'etc/resolv.conf')) + @classmethod + def run(cls, info): + if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')): + os.remove(os.path.join(info.root, 'etc/resolv.conf')) class RemoveHostname(Task): - description = 'Removing the hostname file' - phase = phases.system_cleaning + description = 'Removing the hostname file' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - if os.path.isfile(os.path.join(info.root, 'etc/hostname')): - os.remove(os.path.join(info.root, 'etc/hostname')) + @classmethod + def run(cls, info): + if os.path.isfile(os.path.join(info.root, 'etc/hostname')): + os.remove(os.path.join(info.root, 'etc/hostname')) class SetHostname(Task): - description = 'Writing hostname into the hostname file' - phase = phases.system_modification + description = 'Writing hostname into the hostname file' + phase = phases.system_modification - @classmethod - def run(cls, info): - hostname = info.manifest.system['hostname'].format(**info.manifest_vars) - hostname_file_path = os.path.join(info.root, 'etc/hostname') - with open(hostname_file_path, 'w') as hostname_file: - hostname_file.write(hostname) + @classmethod + def run(cls, info): + hostname = info.manifest.system['hostname'].format(**info.manifest_vars) + hostname_file_path = os.path.join(info.root, 'etc/hostname') + with open(hostname_file_path, 'w') as hostname_file: + hostname_file.write(hostname) - hosts_path = os.path.join(info.root, 'etc/hosts') - from bootstrapvz.common.tools import sed_i - sed_i(hosts_path, '^127.0.0.1\tlocalhost$', '127.0.0.1\tlocalhost\n127.0.1.1\t' + hostname) + hosts_path = os.path.join(info.root, 'etc/hosts') + from bootstrapvz.common.tools import sed_i + sed_i(hosts_path, '^127.0.0.1\tlocalhost$', '127.0.0.1\tlocalhost\n127.0.1.1\t' + hostname) class ConfigureNetworkIF(Task): - description = 'Configuring network interfaces' - phase = phases.system_modification + description = 'Configuring network interfaces' + phase = phases.system_modification - @classmethod - def run(cls, info): - network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.yml') - from ..tools import config_get - if_config = config_get(network_config_path, [info.manifest.release.codename]) + @classmethod + def run(cls, info): + network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.yml') + from ..tools import config_get + if_config = config_get(network_config_path, [info.manifest.release.codename]) - interfaces_path = os.path.join(info.root, 'etc/network/interfaces') - with open(interfaces_path, 'a') as interfaces: - interfaces.write(if_config + '\n') + interfaces_path = os.path.join(info.root, 'etc/network/interfaces') + with open(interfaces_path, 'a') as interfaces: + interfaces.write(if_config + '\n') diff --git a/bootstrapvz/common/tasks/packages.py b/bootstrapvz/common/tasks/packages.py index 403a31c..6aa400e 100644 --- a/bootstrapvz/common/tasks/packages.py +++ b/bootstrapvz/common/tasks/packages.py @@ -5,107 +5,107 @@ from ..tools import log_check_call class AddManifestPackages(Task): - description = 'Adding packages from the manifest' - phase = phases.preparation - predecessors = [apt.AddManifestSources, apt.AddDefaultSources, apt.AddBackports] + description = 'Adding packages from the manifest' + phase = phases.preparation + predecessors = [apt.AddManifestSources, apt.AddDefaultSources, apt.AddBackports] - @classmethod - def run(cls, info): - import re - remote = re.compile('^(?P[^/]+)(/(?P[^/]+))?$') - for package in info.manifest.packages['install']: - match = remote.match(package) - if match is not None: - info.packages.add(match.group('name'), match.group('target')) - else: - info.packages.add_local(package) + @classmethod + def run(cls, info): + import re + remote = re.compile('^(?P[^/]+)(/(?P[^/]+))?$') + for package in info.manifest.packages['install']: + match = remote.match(package) + if match is not None: + info.packages.add(match.group('name'), match.group('target')) + else: + info.packages.add_local(package) class InstallPackages(Task): - description = 'Installing packages' - phase = phases.package_installation - predecessors = [apt.AptUpgrade] + description = 'Installing packages' + phase = phases.package_installation + predecessors = [apt.AptUpgrade] - @classmethod - def run(cls, info): - batch = [] - actions = {info.packages.Remote: cls.install_remote, - info.packages.Local: cls.install_local} - for i, package in enumerate(info.packages.install): - batch.append(package) - next_package = info.packages.install[i + 1] if i + 1 < len(info.packages.install) else None - if next_package is None or package.__class__ is not next_package.__class__: - actions[package.__class__](info, batch) - batch = [] + @classmethod + def run(cls, info): + batch = [] + actions = {info.packages.Remote: cls.install_remote, + info.packages.Local: cls.install_local} + for i, package in enumerate(info.packages.install): + batch.append(package) + next_package = info.packages.install[i + 1] if i + 1 < len(info.packages.install) else None + if next_package is None or package.__class__ is not next_package.__class__: + actions[package.__class__](info, batch) + batch = [] - @classmethod - def install_remote(cls, info, remote_packages): - import os - from ..tools import log_check_call - from subprocess import CalledProcessError - try: - env = os.environ.copy() - env['DEBIAN_FRONTEND'] = 'noninteractive' - log_check_call(['chroot', info.root, - 'apt-get', 'install', - '--no-install-recommends', - '--assume-yes'] + - map(str, remote_packages), - env=env) - except CalledProcessError as e: - import logging - disk_stat = os.statvfs(info.root) - root_free_mb = disk_stat.f_bsize * disk_stat.f_bavail / 1024 / 1024 - disk_stat = os.statvfs(os.path.join(info.root, 'boot')) - boot_free_mb = disk_stat.f_bsize * disk_stat.f_bavail / 1024 / 1024 - free_mb = min(root_free_mb, boot_free_mb) - if free_mb < 50: - msg = ('apt exited with a non-zero status, ' - 'this may be because\nthe image volume is ' - 'running out of disk space ({free}MB left)').format(free=free_mb) - logging.getLogger(__name__).warn(msg) - else: - if e.returncode == 100: - msg = ('apt exited with status code 100. ' - 'This can sometimes occur when package retrieval times out or a package extraction failed. ' - 'apt might succeed if you try bootstrapping again.') - logging.getLogger(__name__).warn(msg) - raise + @classmethod + def install_remote(cls, info, remote_packages): + import os + from ..tools import log_check_call + from subprocess import CalledProcessError + try: + env = os.environ.copy() + env['DEBIAN_FRONTEND'] = 'noninteractive' + log_check_call(['chroot', info.root, + 'apt-get', 'install', + '--no-install-recommends', + '--assume-yes'] + + map(str, remote_packages), + env=env) + except CalledProcessError as e: + import logging + disk_stat = os.statvfs(info.root) + root_free_mb = disk_stat.f_bsize * disk_stat.f_bavail / 1024 / 1024 + disk_stat = os.statvfs(os.path.join(info.root, 'boot')) + boot_free_mb = disk_stat.f_bsize * disk_stat.f_bavail / 1024 / 1024 + free_mb = min(root_free_mb, boot_free_mb) + if free_mb < 50: + msg = ('apt exited with a non-zero status, ' + 'this may be because\nthe image volume is ' + 'running out of disk space ({free}MB left)').format(free=free_mb) + logging.getLogger(__name__).warn(msg) + else: + if e.returncode == 100: + msg = ('apt exited with status code 100. ' + 'This can sometimes occur when package retrieval times out or a package extraction failed. ' + 'apt might succeed if you try bootstrapping again.') + logging.getLogger(__name__).warn(msg) + raise - @classmethod - def install_local(cls, info, local_packages): - from shutil import copy - import os + @classmethod + def install_local(cls, info, local_packages): + from shutil import copy + import os - absolute_package_paths = [] - chrooted_package_paths = [] - for package_src in local_packages: - pkg_name = os.path.basename(package_src.path) - package_rel_dst = os.path.join('tmp', pkg_name) - package_dst = os.path.join(info.root, package_rel_dst) - copy(package_src.path, package_dst) - absolute_package_paths.append(package_dst) - package_path = os.path.join('/', package_rel_dst) - chrooted_package_paths.append(package_path) + absolute_package_paths = [] + chrooted_package_paths = [] + for package_src in local_packages: + pkg_name = os.path.basename(package_src.path) + package_rel_dst = os.path.join('tmp', pkg_name) + package_dst = os.path.join(info.root, package_rel_dst) + copy(package_src.path, package_dst) + absolute_package_paths.append(package_dst) + package_path = os.path.join('/', package_rel_dst) + chrooted_package_paths.append(package_path) - env = os.environ.copy() - env['DEBIAN_FRONTEND'] = 'noninteractive' - log_check_call(['chroot', info.root, - 'dpkg', '--install'] + chrooted_package_paths, - env=env) + env = os.environ.copy() + env['DEBIAN_FRONTEND'] = 'noninteractive' + log_check_call(['chroot', info.root, + 'dpkg', '--install'] + chrooted_package_paths, + env=env) - for path in absolute_package_paths: - os.remove(path) + for path in absolute_package_paths: + os.remove(path) class AddTaskselStandardPackages(Task): - description = 'Adding standard packages from tasksel' - phase = phases.package_installation - predecessors = [apt.AptUpdate] - successors = [InstallPackages] + description = 'Adding standard packages from tasksel' + phase = phases.package_installation + predecessors = [apt.AptUpdate] + successors = [InstallPackages] - @classmethod - def run(cls, info): - tasksel_packages = log_check_call(['chroot', info.root, 'tasksel', '--task-packages', 'standard']) - for pkg in tasksel_packages: - info.packages.add(pkg) + @classmethod + def run(cls, info): + tasksel_packages = log_check_call(['chroot', info.root, 'tasksel', '--task-packages', 'standard']) + for pkg in tasksel_packages: + info.packages.add(pkg) diff --git a/bootstrapvz/common/tasks/partitioning.py b/bootstrapvz/common/tasks/partitioning.py index db75263..c8f8fca 100644 --- a/bootstrapvz/common/tasks/partitioning.py +++ b/bootstrapvz/common/tasks/partitioning.py @@ -6,44 +6,44 @@ import volume class AddRequiredCommands(Task): - description = 'Adding commands required for partitioning the volume' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for partitioning the volume' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - if not isinstance(info.volume.partition_map, NoPartitions): - info.host_dependencies['parted'] = 'parted' - info.host_dependencies['kpartx'] = 'kpartx' + @classmethod + def run(cls, info): + from bootstrapvz.base.fs.partitionmaps.none import NoPartitions + if not isinstance(info.volume.partition_map, NoPartitions): + info.host_dependencies['parted'] = 'parted' + info.host_dependencies['kpartx'] = 'kpartx' class PartitionVolume(Task): - description = 'Partitioning the volume' - phase = phases.volume_preparation + description = 'Partitioning the volume' + phase = phases.volume_preparation - @classmethod - def run(cls, info): - info.volume.partition_map.create(info.volume) + @classmethod + def run(cls, info): + info.volume.partition_map.create(info.volume) class MapPartitions(Task): - description = 'Mapping volume partitions' - phase = phases.volume_preparation - predecessors = [PartitionVolume] - successors = [filesystem.Format] + description = 'Mapping volume partitions' + phase = phases.volume_preparation + predecessors = [PartitionVolume] + successors = [filesystem.Format] - @classmethod - def run(cls, info): - info.volume.partition_map.map(info.volume) + @classmethod + def run(cls, info): + info.volume.partition_map.map(info.volume) class UnmapPartitions(Task): - description = 'Removing volume partitions mapping' - phase = phases.volume_unmounting - predecessors = [filesystem.UnmountRoot] - successors = [volume.Detach] + description = 'Removing volume partitions mapping' + phase = phases.volume_unmounting + predecessors = [filesystem.UnmountRoot] + successors = [volume.Detach] - @classmethod - def run(cls, info): - info.volume.partition_map.unmap(info.volume) + @classmethod + def run(cls, info): + info.volume.partition_map.unmap(info.volume) diff --git a/bootstrapvz/common/tasks/security.py b/bootstrapvz/common/tasks/security.py index 634b9b6..2585d0b 100644 --- a/bootstrapvz/common/tasks/security.py +++ b/bootstrapvz/common/tasks/security.py @@ -3,10 +3,10 @@ from .. import phases class EnableShadowConfig(Task): - description = 'Enabling shadowconfig' - phase = phases.system_modification + description = 'Enabling shadowconfig' + phase = phases.system_modification - @classmethod - def run(cls, info): - from ..tools import log_check_call - log_check_call(['chroot', info.root, 'shadowconfig', 'on']) + @classmethod + def run(cls, info): + from ..tools import log_check_call + log_check_call(['chroot', info.root, 'shadowconfig', 'on']) diff --git a/bootstrapvz/common/tasks/ssh.py b/bootstrapvz/common/tasks/ssh.py index f094b54..bfe7707 100644 --- a/bootstrapvz/common/tasks/ssh.py +++ b/bootstrapvz/common/tasks/ssh.py @@ -7,106 +7,106 @@ import initd class AddOpenSSHPackage(Task): - description = 'Adding openssh package' - phase = phases.preparation + description = 'Adding openssh package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('openssh-server') + @classmethod + def run(cls, info): + info.packages.add('openssh-server') class AddSSHKeyGeneration(Task): - description = 'Adding SSH private key generation init scripts' - phase = phases.system_modification - successors = [initd.InstallInitScripts] + description = 'Adding SSH private key generation init scripts' + phase = phases.system_modification + successors = [initd.InstallInitScripts] - @classmethod - def run(cls, info): - init_scripts_dir = os.path.join(assets, 'init.d') - install = info.initd['install'] - from subprocess import CalledProcessError - try: - log_check_call(['chroot', info.root, - 'dpkg-query', '-W', 'openssh-server']) - from bootstrapvz.common.releases import squeeze - if info.manifest.release == squeeze: - install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys') - else: - install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'generate-ssh-hostkeys') - except CalledProcessError: - import logging - logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' - 'not installing SSH host key generation script.') + @classmethod + def run(cls, info): + init_scripts_dir = os.path.join(assets, 'init.d') + install = info.initd['install'] + from subprocess import CalledProcessError + try: + log_check_call(['chroot', info.root, + 'dpkg-query', '-W', 'openssh-server']) + from bootstrapvz.common.releases import squeeze + if info.manifest.release == squeeze: + install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys') + else: + install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'generate-ssh-hostkeys') + except CalledProcessError: + import logging + logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' + 'not installing SSH host key generation script.') class DisableSSHPasswordAuthentication(Task): - description = 'Disabling SSH password authentication' - phase = phases.system_modification + description = 'Disabling SSH password authentication' + phase = phases.system_modification - @classmethod - def run(cls, info): - from ..tools import sed_i - sshd_config_path = os.path.join(info.root, 'etc/ssh/sshd_config') - sed_i(sshd_config_path, '^#PasswordAuthentication yes', 'PasswordAuthentication no') + @classmethod + def run(cls, info): + from ..tools import sed_i + sshd_config_path = os.path.join(info.root, 'etc/ssh/sshd_config') + sed_i(sshd_config_path, '^#PasswordAuthentication yes', 'PasswordAuthentication no') class EnableRootLogin(Task): - description = 'Enabling SSH login for root' - phase = phases.system_modification + description = 'Enabling SSH login for root' + phase = phases.system_modification - @classmethod - def run(cls, info): - sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') - if os.path.exists(sshdconfig_path): - from bootstrapvz.common.tools import sed_i - sed_i(sshdconfig_path, '^PermitRootLogin .*', 'PermitRootLogin yes') - else: - import logging - logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' - 'not enabling SSH root login.') + @classmethod + def run(cls, info): + sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') + if os.path.exists(sshdconfig_path): + from bootstrapvz.common.tools import sed_i + sed_i(sshdconfig_path, '^PermitRootLogin .*', 'PermitRootLogin yes') + else: + import logging + logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' + 'not enabling SSH root login.') class DisableRootLogin(Task): - description = 'Disabling SSH login for root' - phase = phases.system_modification + description = 'Disabling SSH login for root' + phase = phases.system_modification - @classmethod - def run(cls, info): - sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') - if os.path.exists(sshdconfig_path): - from bootstrapvz.common.tools import sed_i - sed_i(sshdconfig_path, '^PermitRootLogin .*', 'PermitRootLogin no') - else: - import logging - logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' - 'not disabling SSH root login.') + @classmethod + def run(cls, info): + sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') + if os.path.exists(sshdconfig_path): + from bootstrapvz.common.tools import sed_i + sed_i(sshdconfig_path, '^PermitRootLogin .*', 'PermitRootLogin no') + else: + import logging + logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' + 'not disabling SSH root login.') class DisableSSHDNSLookup(Task): - description = 'Disabling sshd remote host name lookup' - phase = phases.system_modification + description = 'Disabling sshd remote host name lookup' + phase = phases.system_modification - @classmethod - def run(cls, info): - sshd_config_path = os.path.join(info.root, 'etc/ssh/sshd_config') - with open(sshd_config_path, 'a') as sshd_config: - sshd_config.write('UseDNS no') + @classmethod + def run(cls, info): + sshd_config_path = os.path.join(info.root, 'etc/ssh/sshd_config') + with open(sshd_config_path, 'a') as sshd_config: + sshd_config.write('UseDNS no') class ShredHostkeys(Task): - description = 'Securely deleting ssh hostkeys' - phase = phases.system_cleaning + description = 'Securely deleting ssh hostkeys' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - ssh_hostkeys = ['ssh_host_dsa_key', - 'ssh_host_rsa_key'] - from bootstrapvz.common.releases import wheezy - if info.manifest.release >= wheezy: - ssh_hostkeys.append('ssh_host_ecdsa_key') + @classmethod + def run(cls, info): + ssh_hostkeys = ['ssh_host_dsa_key', + 'ssh_host_rsa_key'] + from bootstrapvz.common.releases import wheezy + if info.manifest.release >= wheezy: + ssh_hostkeys.append('ssh_host_ecdsa_key') - private = [os.path.join(info.root, 'etc/ssh', name) for name in ssh_hostkeys] - public = [path + '.pub' for path in private] + private = [os.path.join(info.root, 'etc/ssh', name) for name in ssh_hostkeys] + public = [path + '.pub' for path in private] - from ..tools import log_check_call - log_check_call(['shred', '--remove'] + private + public) + from ..tools import log_check_call + log_check_call(['shred', '--remove'] + private + public) diff --git a/bootstrapvz/common/tasks/volume.py b/bootstrapvz/common/tasks/volume.py index 337d6b7..b810397 100644 --- a/bootstrapvz/common/tasks/volume.py +++ b/bootstrapvz/common/tasks/volume.py @@ -4,28 +4,28 @@ import workspace class Attach(Task): - description = 'Attaching the volume' - phase = phases.volume_creation + description = 'Attaching the volume' + phase = phases.volume_creation - @classmethod - def run(cls, info): - info.volume.attach() + @classmethod + def run(cls, info): + info.volume.attach() class Detach(Task): - description = 'Detaching the volume' - phase = phases.volume_unmounting + description = 'Detaching the volume' + phase = phases.volume_unmounting - @classmethod - def run(cls, info): - info.volume.detach() + @classmethod + def run(cls, info): + info.volume.detach() class Delete(Task): - description = 'Deleting the volume' - phase = phases.cleaning - successors = [workspace.DeleteWorkspace] + description = 'Deleting the volume' + phase = phases.cleaning + successors = [workspace.DeleteWorkspace] - @classmethod - def run(cls, info): - info.volume.delete() + @classmethod + def run(cls, info): + info.volume.delete() diff --git a/bootstrapvz/common/tasks/workspace.py b/bootstrapvz/common/tasks/workspace.py index bd1ddac..e005744 100644 --- a/bootstrapvz/common/tasks/workspace.py +++ b/bootstrapvz/common/tasks/workspace.py @@ -3,20 +3,20 @@ from .. import phases class CreateWorkspace(Task): - description = 'Creating workspace' - phase = phases.preparation + description = 'Creating workspace' + phase = phases.preparation - @classmethod - def run(cls, info): - import os - os.makedirs(info.workspace) + @classmethod + def run(cls, info): + import os + os.makedirs(info.workspace) class DeleteWorkspace(Task): - description = 'Deleting workspace' - phase = phases.cleaning + description = 'Deleting workspace' + phase = phases.cleaning - @classmethod - def run(cls, info): - import os - os.rmdir(info.workspace) + @classmethod + def run(cls, info): + import os + os.rmdir(info.workspace) diff --git a/bootstrapvz/common/tools.py b/bootstrapvz/common/tools.py index 5df7ff3..ada2909 100644 --- a/bootstrapvz/common/tools.py +++ b/bootstrapvz/common/tools.py @@ -2,134 +2,134 @@ import os def log_check_call(command, stdin=None, env=None, shell=False, cwd=None): - status, stdout, stderr = log_call(command, stdin, env, shell, cwd) - from subprocess import CalledProcessError - if status != 0: - e = CalledProcessError(status, ' '.join(command), '\n'.join(stderr)) - # Fix Pyro4's fixIronPythonExceptionForPickle() by setting the args property, - # even though we use our own serialization (at least I think that's the problem). - # See bootstrapvz.remote.serialize_called_process_error for more info. - setattr(e, 'args', (status, ' '.join(command), '\n'.join(stderr))) - raise e - return stdout + status, stdout, stderr = log_call(command, stdin, env, shell, cwd) + from subprocess import CalledProcessError + if status != 0: + e = CalledProcessError(status, ' '.join(command), '\n'.join(stderr)) + # Fix Pyro4's fixIronPythonExceptionForPickle() by setting the args property, + # even though we use our own serialization (at least I think that's the problem). + # See bootstrapvz.remote.serialize_called_process_error for more info. + setattr(e, 'args', (status, ' '.join(command), '\n'.join(stderr))) + raise e + return stdout def log_call(command, stdin=None, env=None, shell=False, cwd=None): - import subprocess - import logging - from multiprocessing.dummy import Pool as ThreadPool - from os.path import realpath + import subprocess + import logging + from multiprocessing.dummy import Pool as ThreadPool + from os.path import realpath - command_log = realpath(command[0]).replace('/', '.') - log = logging.getLogger(__name__ + command_log) - if type(command) is list: - log.debug('Executing: {command}'.format(command=' '.join(command))) - else: - log.debug('Executing: {command}'.format(command=command)) + command_log = realpath(command[0]).replace('/', '.') + log = logging.getLogger(__name__ + command_log) + if type(command) is list: + log.debug('Executing: {command}'.format(command=' '.join(command))) + else: + log.debug('Executing: {command}'.format(command=command)) - process = subprocess.Popen(args=command, env=env, shell=shell, cwd=cwd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + process = subprocess.Popen(args=command, env=env, shell=shell, cwd=cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) - if stdin is not None: - log.debug(' stdin: ' + stdin) - process.stdin.write(stdin + "\n") - process.stdin.flush() - process.stdin.close() + if stdin is not None: + log.debug(' stdin: ' + stdin) + process.stdin.write(stdin + "\n") + process.stdin.flush() + process.stdin.close() - stdout = [] - stderr = [] + stdout = [] + stderr = [] - def handle_stdout(line): - log.debug(line) - stdout.append(line) + def handle_stdout(line): + log.debug(line) + stdout.append(line) - def handle_stderr(line): - log.error(line) - stderr.append(line) + def handle_stderr(line): + log.error(line) + stderr.append(line) - handlers = {process.stdout: handle_stdout, - process.stderr: handle_stderr} + handlers = {process.stdout: handle_stdout, + process.stderr: handle_stderr} - def stream_readline(stream): - for line in iter(stream.readline, ''): - handlers[stream](line.strip()) + def stream_readline(stream): + for line in iter(stream.readline, ''): + handlers[stream](line.strip()) - pool = ThreadPool(2) - pool.map(stream_readline, [process.stdout, process.stderr]) - pool.close() - pool.join() - process.wait() - return process.returncode, stdout, stderr + pool = ThreadPool(2) + pool.map(stream_readline, [process.stdout, process.stderr]) + pool.close() + pool.join() + process.wait() + return process.returncode, stdout, stderr def sed_i(file_path, pattern, subst, expected_replacements=1): - replacement_count = inline_replace(file_path, pattern, subst) - if replacement_count != expected_replacements: - from exceptions import UnexpectedNumMatchesError - msg = ('There were {real} instead of {expected} matches for ' - 'the expression `{exp}\' in the file `{path}\'' - .format(real=replacement_count, expected=expected_replacements, - exp=pattern, path=file_path)) - raise UnexpectedNumMatchesError(msg) + replacement_count = inline_replace(file_path, pattern, subst) + if replacement_count != expected_replacements: + from exceptions import UnexpectedNumMatchesError + msg = ('There were {real} instead of {expected} matches for ' + 'the expression `{exp}\' in the file `{path}\'' + .format(real=replacement_count, expected=expected_replacements, + exp=pattern, path=file_path)) + raise UnexpectedNumMatchesError(msg) def inline_replace(file_path, pattern, subst): - import fileinput - import re - replacement_count = 0 - for line in fileinput.input(files=file_path, inplace=True): - (replacement, count) = re.subn(pattern, subst, line) - replacement_count += count - print replacement, - return replacement_count + import fileinput + import re + replacement_count = 0 + for line in fileinput.input(files=file_path, inplace=True): + (replacement, count) = re.subn(pattern, subst, line) + replacement_count += count + print replacement, + return replacement_count def load_json(path): - import json - from minify_json import json_minify - with open(path) as stream: - return json.loads(json_minify(stream.read(), False)) + import json + from minify_json import json_minify + with open(path) as stream: + return json.loads(json_minify(stream.read(), False)) def load_yaml(path): - import yaml - with open(path, 'r') as stream: - return yaml.safe_load(stream) + import yaml + with open(path, 'r') as stream: + return yaml.safe_load(stream) def load_data(path): - filename, extension = os.path.splitext(path) - if not os.path.isfile(path): - raise Exception('The path {path} does not point to a file.'.format(path=path)) - if extension == '.json': - return load_json(path) - elif extension == '.yml' or extension == '.yaml': - return load_yaml(path) - else: - raise Exception('Unrecognized extension: {ext}'.format(ext=extension)) + filename, extension = os.path.splitext(path) + if not os.path.isfile(path): + raise Exception('The path {path} does not point to a file.'.format(path=path)) + if extension == '.json': + return load_json(path) + elif extension == '.yml' or extension == '.yaml': + return load_yaml(path) + else: + raise Exception('Unrecognized extension: {ext}'.format(ext=extension)) def config_get(path, config_path): - config = load_data(path) - for key in config_path: - config = config.get(key) - return config + config = load_data(path) + for key in config_path: + config = config.get(key) + return config def copy_tree(from_path, to_path): - from shutil import copy - for abs_prefix, dirs, files in os.walk(from_path): - prefix = os.path.normpath(os.path.relpath(abs_prefix, from_path)) - for path in dirs: - full_path = os.path.join(to_path, prefix, path) - if os.path.exists(full_path): - if os.path.isdir(full_path): - continue - else: - os.remove(full_path) - os.mkdir(full_path) - for path in files: - copy(os.path.join(abs_prefix, path), - os.path.join(to_path, prefix, path)) + from shutil import copy + for abs_prefix, dirs, files in os.walk(from_path): + prefix = os.path.normpath(os.path.relpath(abs_prefix, from_path)) + for path in dirs: + full_path = os.path.join(to_path, prefix, path) + if os.path.exists(full_path): + if os.path.isdir(full_path): + continue + else: + os.remove(full_path) + os.mkdir(full_path) + for path in files: + copy(os.path.join(abs_prefix, path), + os.path.join(to_path, prefix, path)) diff --git a/bootstrapvz/plugins/admin_user/__init__.py b/bootstrapvz/plugins/admin_user/__init__.py index 7202f2e..4b81c56 100644 --- a/bootstrapvz/plugins/admin_user/__init__.py +++ b/bootstrapvz/plugins/admin_user/__init__.py @@ -1,37 +1,37 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) - pubkey = data['plugins']['admin_user'].get('pubkey', None) - if pubkey is not None and not os.path.exists(pubkey): - msg = 'Could not find public key at %s' % pubkey - error(msg, ['plugins', 'admin_user', 'pubkey']) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + pubkey = data['plugins']['admin_user'].get('pubkey', None) + if pubkey is not None and not os.path.exists(pubkey): + msg = 'Could not find public key at %s' % pubkey + error(msg, ['plugins', 'admin_user', 'pubkey']) def resolve_tasks(taskset, manifest): - import logging - import tasks - from bootstrapvz.common.tasks import ssh + import logging + import tasks + from bootstrapvz.common.tasks import ssh - from bootstrapvz.common.releases import jessie - if manifest.release < jessie: - taskset.update([ssh.DisableRootLogin]) + from bootstrapvz.common.releases import jessie + if manifest.release < jessie: + taskset.update([ssh.DisableRootLogin]) - if 'password' in manifest.plugins['admin_user']: - taskset.discard(ssh.DisableSSHPasswordAuthentication) - taskset.add(tasks.AdminUserPassword) + if 'password' in manifest.plugins['admin_user']: + taskset.discard(ssh.DisableSSHPasswordAuthentication) + taskset.add(tasks.AdminUserPassword) - if 'pubkey' in manifest.plugins['admin_user']: - taskset.add(tasks.AdminUserPublicKey) - elif manifest.provider['name'] == 'ec2': - logging.getLogger(__name__).info("The SSH key will be obtained from EC2") - taskset.add(tasks.AdminUserPublicKeyEC2) - elif 'password' not in manifest.plugins['admin_user']: - logging.getLogger(__name__).warn("No SSH key and no password set") + if 'pubkey' in manifest.plugins['admin_user']: + taskset.add(tasks.AdminUserPublicKey) + elif manifest.provider['name'] == 'ec2': + logging.getLogger(__name__).info("The SSH key will be obtained from EC2") + taskset.add(tasks.AdminUserPublicKeyEC2) + elif 'password' not in manifest.plugins['admin_user']: + logging.getLogger(__name__).warn("No SSH key and no password set") - taskset.update([tasks.AddSudoPackage, - tasks.CreateAdminUser, - tasks.PasswordlessSudo, - ]) + taskset.update([tasks.AddSudoPackage, + tasks.CreateAdminUser, + tasks.PasswordlessSudo, + ]) diff --git a/bootstrapvz/plugins/admin_user/tasks.py b/bootstrapvz/plugins/admin_user/tasks.py index abfa0e1..583e0e5 100644 --- a/bootstrapvz/plugins/admin_user/tasks.py +++ b/bootstrapvz/plugins/admin_user/tasks.py @@ -9,104 +9,104 @@ log = logging.getLogger(__name__) class AddSudoPackage(Task): - description = 'Adding `sudo\' to the image packages' - phase = phases.preparation + description = 'Adding `sudo\' to the image packages' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('sudo') + @classmethod + def run(cls, info): + info.packages.add('sudo') class CreateAdminUser(Task): - description = 'Creating the admin user' - phase = phases.system_modification + description = 'Creating the admin user' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, - 'useradd', - '--create-home', '--shell', '/bin/bash', - info.manifest.plugins['admin_user']['username']]) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, + 'useradd', + '--create-home', '--shell', '/bin/bash', + info.manifest.plugins['admin_user']['username']]) class PasswordlessSudo(Task): - description = 'Allowing the admin user to use sudo without a password' - phase = phases.system_modification + description = 'Allowing the admin user to use sudo without a password' + phase = phases.system_modification - @classmethod - def run(cls, info): - sudo_admin_path = os.path.join(info.root, 'etc/sudoers.d/99_admin') - username = info.manifest.plugins['admin_user']['username'] - with open(sudo_admin_path, 'w') as sudo_admin: - sudo_admin.write('{username} ALL=(ALL) NOPASSWD:ALL'.format(username=username)) - import stat - ug_read_only = (stat.S_IRUSR | stat.S_IRGRP) - os.chmod(sudo_admin_path, ug_read_only) + @classmethod + def run(cls, info): + sudo_admin_path = os.path.join(info.root, 'etc/sudoers.d/99_admin') + username = info.manifest.plugins['admin_user']['username'] + with open(sudo_admin_path, 'w') as sudo_admin: + sudo_admin.write('{username} ALL=(ALL) NOPASSWD:ALL'.format(username=username)) + import stat + ug_read_only = (stat.S_IRUSR | stat.S_IRGRP) + os.chmod(sudo_admin_path, ug_read_only) class AdminUserPassword(Task): - description = 'Setting the admin user password' - phase = phases.system_modification - predecessors = [InstallInitScripts, CreateAdminUser] + description = 'Setting the admin user password' + phase = phases.system_modification + predecessors = [InstallInitScripts, CreateAdminUser] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, 'chpasswd'], - info.manifest.plugins['admin_user']['username'] + - ':' + info.manifest.plugins['admin_user']['password']) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, 'chpasswd'], + info.manifest.plugins['admin_user']['username'] + + ':' + info.manifest.plugins['admin_user']['password']) class AdminUserPublicKey(Task): - description = 'Installing the public key for the admin user' - phase = phases.system_modification - predecessors = [AddEC2InitScripts, CreateAdminUser] - successors = [InstallInitScripts] + description = 'Installing the public key for the admin user' + phase = phases.system_modification + predecessors = [AddEC2InitScripts, CreateAdminUser] + successors = [InstallInitScripts] - @classmethod - def run(cls, info): - if 'ec2-get-credentials' in info.initd['install']: - log.warn('You are using a static public key for the admin account.' - 'This will conflict with the ec2 public key injection mechanism.' - 'The ec2-get-credentials startup script will therefore not be enabled.') - del info.initd['install']['ec2-get-credentials'] + @classmethod + def run(cls, info): + if 'ec2-get-credentials' in info.initd['install']: + log.warn('You are using a static public key for the admin account.' + 'This will conflict with the ec2 public key injection mechanism.' + 'The ec2-get-credentials startup script will therefore not be enabled.') + del info.initd['install']['ec2-get-credentials'] - # Get the stuff we need (username & public key) - username = info.manifest.plugins['admin_user']['username'] - with open(info.manifest.plugins['admin_user']['pubkey']) as pubkey_handle: - pubkey = pubkey_handle.read() + # Get the stuff we need (username & public key) + username = info.manifest.plugins['admin_user']['username'] + with open(info.manifest.plugins['admin_user']['pubkey']) as pubkey_handle: + pubkey = pubkey_handle.read() - # paths - ssh_dir_rel = os.path.join('home', username, '.ssh') - auth_keys_rel = os.path.join(ssh_dir_rel, 'authorized_keys') - ssh_dir_abs = os.path.join(info.root, ssh_dir_rel) - auth_keys_abs = os.path.join(info.root, auth_keys_rel) - # Create the ssh dir if nobody has created it yet - if not os.path.exists(ssh_dir_abs): - os.mkdir(ssh_dir_abs, 0700) + # paths + ssh_dir_rel = os.path.join('home', username, '.ssh') + auth_keys_rel = os.path.join(ssh_dir_rel, 'authorized_keys') + ssh_dir_abs = os.path.join(info.root, ssh_dir_rel) + auth_keys_abs = os.path.join(info.root, auth_keys_rel) + # Create the ssh dir if nobody has created it yet + if not os.path.exists(ssh_dir_abs): + os.mkdir(ssh_dir_abs, 0700) - # Create (or append to) the authorized keys file (and chmod u=rw,go=) - import stat - with open(auth_keys_abs, 'a') as auth_keys_handle: - auth_keys_handle.write(pubkey + '\n') - os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) + # Create (or append to) the authorized keys file (and chmod u=rw,go=) + import stat + with open(auth_keys_abs, 'a') as auth_keys_handle: + auth_keys_handle.write(pubkey + '\n') + os.chmod(auth_keys_abs, (stat.S_IRUSR | stat.S_IWUSR)) - # Set the owner of the authorized keys file - # (must be through chroot, the host system doesn't know about the user) - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, - 'chown', '-R', (username + ':' + username), ssh_dir_rel]) + # Set the owner of the authorized keys file + # (must be through chroot, the host system doesn't know about the user) + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, + 'chown', '-R', (username + ':' + username), ssh_dir_rel]) class AdminUserPublicKeyEC2(Task): - description = 'Modifying ec2-get-credentials to copy the ssh public key to the admin user' - phase = phases.system_modification - predecessors = [InstallInitScripts, CreateAdminUser] + description = 'Modifying ec2-get-credentials to copy the ssh public key to the admin user' + phase = phases.system_modification + predecessors = [InstallInitScripts, CreateAdminUser] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - getcreds_path = os.path.join(info.root, 'etc/init.d/ec2-get-credentials') - username = info.manifest.plugins['admin_user']['username'] - sed_i(getcreds_path, "username='root'", "username='{username}'".format(username=username)) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + getcreds_path = os.path.join(info.root, 'etc/init.d/ec2-get-credentials') + username = info.manifest.plugins['admin_user']['username'] + sed_i(getcreds_path, "username='root'", "username='{username}'".format(username=username)) diff --git a/bootstrapvz/plugins/apt_proxy/__init__.py b/bootstrapvz/plugins/apt_proxy/__init__.py index 132c679..4f07704 100644 --- a/bootstrapvz/plugins/apt_proxy/__init__.py +++ b/bootstrapvz/plugins/apt_proxy/__init__.py @@ -1,12 +1,12 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - import tasks - taskset.add(tasks.CheckAptProxy) - taskset.add(tasks.SetAptProxy) - if not manifest.plugins['apt_proxy'].get('persistent', False): - taskset.add(tasks.RemoveAptProxy) + import tasks + taskset.add(tasks.CheckAptProxy) + taskset.add(tasks.SetAptProxy) + if not manifest.plugins['apt_proxy'].get('persistent', False): + taskset.add(tasks.RemoveAptProxy) diff --git a/bootstrapvz/plugins/apt_proxy/tasks.py b/bootstrapvz/plugins/apt_proxy/tasks.py index c813ec4..015fb69 100644 --- a/bootstrapvz/plugins/apt_proxy/tasks.py +++ b/bootstrapvz/plugins/apt_proxy/tasks.py @@ -6,55 +6,55 @@ import urllib2 class CheckAptProxy(Task): - description = 'Checking reachability of APT proxy server' - phase = phases.preparation + description = 'Checking reachability of APT proxy server' + phase = phases.preparation - @classmethod - def run(cls, info): - proxy_address = info.manifest.plugins['apt_proxy']['address'] - proxy_port = info.manifest.plugins['apt_proxy']['port'] - proxy_url = 'http://{address}:{port}'.format(address=proxy_address, port=proxy_port) - try: - urllib2.urlopen(proxy_url, timeout=5) - except Exception as e: - # Default response from `apt-cacher-ng` - if isinstance(e, urllib2.HTTPError) and e.code in [404, 406] and e.msg == 'Usage Information': - pass - else: - import logging - log = logging.getLogger(__name__) - log.warning('The APT proxy server couldn\'t be reached. `apt-get\' commands may fail.') + @classmethod + def run(cls, info): + proxy_address = info.manifest.plugins['apt_proxy']['address'] + proxy_port = info.manifest.plugins['apt_proxy']['port'] + proxy_url = 'http://{address}:{port}'.format(address=proxy_address, port=proxy_port) + try: + urllib2.urlopen(proxy_url, timeout=5) + except Exception as e: + # Default response from `apt-cacher-ng` + if isinstance(e, urllib2.HTTPError) and e.code in [404, 406] and e.msg == 'Usage Information': + pass + else: + import logging + log = logging.getLogger(__name__) + log.warning('The APT proxy server couldn\'t be reached. `apt-get\' commands may fail.') class SetAptProxy(Task): - description = 'Setting proxy for APT' - phase = phases.package_installation - successors = [apt.AptUpdate] + description = 'Setting proxy for APT' + phase = phases.package_installation + successors = [apt.AptUpdate] - @classmethod - def run(cls, info): - proxy_path = os.path.join(info.root, 'etc/apt/apt.conf.d/02proxy') - proxy_username = info.manifest.plugins['apt_proxy'].get('username') - proxy_password = info.manifest.plugins['apt_proxy'].get('password') - proxy_address = info.manifest.plugins['apt_proxy']['address'] - proxy_port = info.manifest.plugins['apt_proxy']['port'] + @classmethod + def run(cls, info): + proxy_path = os.path.join(info.root, 'etc/apt/apt.conf.d/02proxy') + proxy_username = info.manifest.plugins['apt_proxy'].get('username') + proxy_password = info.manifest.plugins['apt_proxy'].get('password') + proxy_address = info.manifest.plugins['apt_proxy']['address'] + proxy_port = info.manifest.plugins['apt_proxy']['port'] - if None not in (proxy_username, proxy_password): - proxy_auth = '{username}:{password}@'.format( - username=proxy_username, password=proxy_password) - else: - proxy_auth = '' + if None not in (proxy_username, proxy_password): + proxy_auth = '{username}:{password}@'.format( + username=proxy_username, password=proxy_password) + else: + proxy_auth = '' - with open(proxy_path, 'w') as proxy_file: - proxy_file.write( - 'Acquire::http {{ Proxy "http://{auth}{address}:{port}"; }};\n' - .format(auth=proxy_auth, address=proxy_address, port=proxy_port)) + with open(proxy_path, 'w') as proxy_file: + proxy_file.write( + 'Acquire::http {{ Proxy "http://{auth}{address}:{port}"; }};\n' + .format(auth=proxy_auth, address=proxy_address, port=proxy_port)) class RemoveAptProxy(Task): - description = 'Removing APT proxy configuration file' - phase = phases.system_cleaning + description = 'Removing APT proxy configuration file' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - os.remove(os.path.join(info.root, 'etc/apt/apt.conf.d/02proxy')) + @classmethod + def run(cls, info): + os.remove(os.path.join(info.root, 'etc/apt/apt.conf.d/02proxy')) diff --git a/bootstrapvz/plugins/chef/__init__.py b/bootstrapvz/plugins/chef/__init__.py index 5716b20..e415822 100644 --- a/bootstrapvz/plugins/chef/__init__.py +++ b/bootstrapvz/plugins/chef/__init__.py @@ -2,13 +2,13 @@ import tasks def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.add(tasks.AddPackages) - if 'assets' in manifest.plugins['chef']: - taskset.add(tasks.CheckAssetsPath) - taskset.add(tasks.CopyChefAssets) + taskset.add(tasks.AddPackages) + if 'assets' in manifest.plugins['chef']: + taskset.add(tasks.CheckAssetsPath) + taskset.add(tasks.CopyChefAssets) diff --git a/bootstrapvz/plugins/chef/tasks.py b/bootstrapvz/plugins/chef/tasks.py index 2c02adf..649c287 100644 --- a/bootstrapvz/plugins/chef/tasks.py +++ b/bootstrapvz/plugins/chef/tasks.py @@ -4,35 +4,35 @@ import os class CheckAssetsPath(Task): - description = 'Checking whether the assets path exist' - phase = phases.preparation + description = 'Checking whether the assets path exist' + phase = phases.preparation - @classmethod - def run(cls, info): - from bootstrapvz.common.exceptions import TaskError - assets = info.manifest.plugins['chef']['assets'] - if not os.path.exists(assets): - msg = 'The assets directory {assets} does not exist.'.format(assets=assets) - raise TaskError(msg) - if not os.path.isdir(assets): - msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets) - raise TaskError(msg) + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + assets = info.manifest.plugins['chef']['assets'] + if not os.path.exists(assets): + msg = 'The assets directory {assets} does not exist.'.format(assets=assets) + raise TaskError(msg) + if not os.path.isdir(assets): + msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets) + raise TaskError(msg) class AddPackages(Task): - description = 'Add chef package' - phase = phases.preparation + description = 'Add chef package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('chef') + @classmethod + def run(cls, info): + info.packages.add('chef') class CopyChefAssets(Task): - description = 'Copying chef assets' - phase = phases.system_modification + description = 'Copying chef assets' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import copy_tree - copy_tree(info.manifest.plugins['chef']['assets'], os.path.join(info.root, 'etc/chef')) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import copy_tree + copy_tree(info.manifest.plugins['chef']['assets'], os.path.join(info.root, 'etc/chef')) diff --git a/bootstrapvz/plugins/cloud_init/__init__.py b/bootstrapvz/plugins/cloud_init/__init__.py index d3c8a33..95fa97f 100644 --- a/bootstrapvz/plugins/cloud_init/__init__.py +++ b/bootstrapvz/plugins/cloud_init/__init__.py @@ -1,36 +1,36 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - import tasks - import bootstrapvz.providers.ec2.tasks.initd as initd_ec2 - from bootstrapvz.common.tasks import apt - from bootstrapvz.common.tasks import initd - from bootstrapvz.common.tasks import ssh + import tasks + import bootstrapvz.providers.ec2.tasks.initd as initd_ec2 + from bootstrapvz.common.tasks import apt + from bootstrapvz.common.tasks import initd + from bootstrapvz.common.tasks import ssh - from bootstrapvz.common.releases import wheezy - if manifest.release == wheezy: - taskset.add(apt.AddBackports) + from bootstrapvz.common.releases import wheezy + if manifest.release == wheezy: + taskset.add(apt.AddBackports) - taskset.update([tasks.SetMetadataSource, - tasks.AddCloudInitPackages, - ]) + taskset.update([tasks.SetMetadataSource, + tasks.AddCloudInitPackages, + ]) - options = manifest.plugins['cloud_init'] - if 'username' in options: - taskset.add(tasks.SetUsername) - if 'groups' in options and len(options['groups']): - taskset.add(tasks.SetGroups) - if 'disable_modules' in options: - taskset.add(tasks.DisableModules) + options = manifest.plugins['cloud_init'] + if 'username' in options: + taskset.add(tasks.SetUsername) + if 'groups' in options and len(options['groups']): + taskset.add(tasks.SetGroups) + if 'disable_modules' in options: + taskset.add(tasks.DisableModules) - taskset.discard(initd_ec2.AddEC2InitScripts) - taskset.discard(initd.AddExpandRoot) - taskset.discard(initd.AdjustExpandRootScript) - taskset.discard(initd.AdjustGrowpartWorkaround) - taskset.discard(ssh.AddSSHKeyGeneration) + taskset.discard(initd_ec2.AddEC2InitScripts) + taskset.discard(initd.AddExpandRoot) + taskset.discard(initd.AdjustExpandRootScript) + taskset.discard(initd.AdjustGrowpartWorkaround) + taskset.discard(ssh.AddSSHKeyGeneration) diff --git a/bootstrapvz/plugins/cloud_init/tasks.py b/bootstrapvz/plugins/cloud_init/tasks.py index b25a6c1..2d1aee4 100644 --- a/bootstrapvz/plugins/cloud_init/tasks.py +++ b/bootstrapvz/plugins/cloud_init/tasks.py @@ -8,92 +8,92 @@ import os.path class AddCloudInitPackages(Task): - description = 'Adding cloud-init package and sudo' - phase = phases.preparation - predecessors = [apt.AddBackports] + description = 'Adding cloud-init package and sudo' + phase = phases.preparation + predecessors = [apt.AddBackports] - @classmethod - def run(cls, info): - target = None - from bootstrapvz.common.releases import wheezy - if info.manifest.release == wheezy: - target = '{system.release}-backports' - info.packages.add('cloud-init', target) - info.packages.add('sudo') + @classmethod + def run(cls, info): + target = None + from bootstrapvz.common.releases import wheezy + if info.manifest.release == wheezy: + target = '{system.release}-backports' + info.packages.add('cloud-init', target) + info.packages.add('sudo') class SetUsername(Task): - description = 'Setting username in cloud.cfg' - phase = phases.system_modification + description = 'Setting username in cloud.cfg' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') - username = info.manifest.plugins['cloud_init']['username'] - search = '^ name: debian$' - replace = (' name: {username}\n' - ' sudo: ALL=(ALL) NOPASSWD:ALL\n' - ' shell: /bin/bash').format(username=username) - sed_i(cloud_cfg, search, replace) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') + username = info.manifest.plugins['cloud_init']['username'] + search = '^ name: debian$' + replace = (' name: {username}\n' + ' sudo: ALL=(ALL) NOPASSWD:ALL\n' + ' shell: /bin/bash').format(username=username) + sed_i(cloud_cfg, search, replace) class SetGroups(Task): - description = 'Setting groups in cloud.cfg' - phase = phases.system_modification + description = 'Setting groups in cloud.cfg' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') - groups = info.manifest.plugins['cloud_init']['groups'] - search = ('^ groups: \[adm, audio, cdrom, dialout, floppy, video,' - ' plugdev, dip\]$') - replace = (' groups: [adm, audio, cdrom, dialout, floppy, video,' - ' plugdev, dip, {groups}]').format(groups=', '.join(groups)) - sed_i(cloud_cfg, search, replace) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') + groups = info.manifest.plugins['cloud_init']['groups'] + search = ('^ groups: \[adm, audio, cdrom, dialout, floppy, video,' + ' plugdev, dip\]$') + replace = (' groups: [adm, audio, cdrom, dialout, floppy, video,' + ' plugdev, dip, {groups}]').format(groups=', '.join(groups)) + sed_i(cloud_cfg, search, replace) class SetMetadataSource(Task): - description = 'Setting metadata source' - phase = phases.package_installation - predecessors = [locale.GenerateLocale] - successors = [apt.AptUpdate] + description = 'Setting metadata source' + phase = phases.package_installation + predecessors = [locale.GenerateLocale] + successors = [apt.AptUpdate] - @classmethod - def run(cls, info): - if 'metadata_sources' in info.manifest.plugins['cloud_init']: - sources = info.manifest.plugins['cloud_init']['metadata_sources'] - else: - source_mapping = {'ec2': 'Ec2'} - sources = source_mapping.get(info.manifest.provider['name'], None) - if sources is None: - msg = ('No cloud-init metadata source mapping found for provider `{provider}\', ' - 'skipping selections setting.').format(provider=info.manifest.provider['name']) - logging.getLogger(__name__).warn(msg) - return - sources = "cloud-init cloud-init/datasources multiselect " + sources - log_check_call(['chroot', info.root, 'debconf-set-selections'], sources) + @classmethod + def run(cls, info): + if 'metadata_sources' in info.manifest.plugins['cloud_init']: + sources = info.manifest.plugins['cloud_init']['metadata_sources'] + else: + source_mapping = {'ec2': 'Ec2'} + sources = source_mapping.get(info.manifest.provider['name'], None) + if sources is None: + msg = ('No cloud-init metadata source mapping found for provider `{provider}\', ' + 'skipping selections setting.').format(provider=info.manifest.provider['name']) + logging.getLogger(__name__).warn(msg) + return + sources = "cloud-init cloud-init/datasources multiselect " + sources + log_check_call(['chroot', info.root, 'debconf-set-selections'], sources) class DisableModules(Task): - description = 'Setting cloud.cfg modules' - phase = phases.system_modification + description = 'Setting cloud.cfg modules' + phase = phases.system_modification - @classmethod - def run(cls, info): - import re - patterns = "" - for pattern in info.manifest.plugins['cloud_init']['disable_modules']: - if patterns != "": - patterns = patterns + "|" + pattern - else: - patterns = "^\s+-\s+(" + pattern - patterns = patterns + ")$" - regex = re.compile(patterns) + @classmethod + def run(cls, info): + import re + patterns = "" + for pattern in info.manifest.plugins['cloud_init']['disable_modules']: + if patterns != "": + patterns = patterns + "|" + pattern + else: + patterns = "^\s+-\s+(" + pattern + patterns = patterns + ")$" + regex = re.compile(patterns) - cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') - import fileinput - for line in fileinput.input(files=cloud_cfg, inplace=True): - if not regex.match(line): - print line, + cloud_cfg = os.path.join(info.root, 'etc/cloud/cloud.cfg') + import fileinput + for line in fileinput.input(files=cloud_cfg, inplace=True): + if not regex.match(line): + print line, diff --git a/bootstrapvz/plugins/commands/__init__.py b/bootstrapvz/plugins/commands/__init__.py index 4d1600f..7c21689 100644 --- a/bootstrapvz/plugins/commands/__init__.py +++ b/bootstrapvz/plugins/commands/__init__.py @@ -1,11 +1,11 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - from tasks import ImageExecuteCommand - taskset.add(ImageExecuteCommand) + from tasks import ImageExecuteCommand + taskset.add(ImageExecuteCommand) diff --git a/bootstrapvz/plugins/commands/tasks.py b/bootstrapvz/plugins/commands/tasks.py index 2dd6eda..0482778 100644 --- a/bootstrapvz/plugins/commands/tasks.py +++ b/bootstrapvz/plugins/commands/tasks.py @@ -3,13 +3,13 @@ from bootstrapvz.common import phases class ImageExecuteCommand(Task): - description = 'Executing commands in the image' - phase = phases.user_modification + description = 'Executing commands in the image' + phase = phases.user_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - for raw_command in info.manifest.plugins['commands']['commands']: - command = map(lambda part: part.format(root=info.root, **info.manifest_vars), raw_command) - shell = len(command) == 1 - log_check_call(command, shell=shell) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + for raw_command in info.manifest.plugins['commands']['commands']: + command = map(lambda part: part.format(root=info.root, **info.manifest_vars), raw_command) + shell = len(command) == 1 + log_check_call(command, shell=shell) diff --git a/bootstrapvz/plugins/docker_daemon/__init__.py b/bootstrapvz/plugins/docker_daemon/__init__.py index 11a314b..c2c3a0c 100644 --- a/bootstrapvz/plugins/docker_daemon/__init__.py +++ b/bootstrapvz/plugins/docker_daemon/__init__.py @@ -5,23 +5,23 @@ from bootstrapvz.common.releases import wheezy def validate_manifest(data, validator, error): - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) - from bootstrapvz.common.releases import get_release - if get_release(data['system']['release']) == wheezy: - # prefs is a generator of apt preferences across files in the manifest - prefs = (item for vals in data.get('packages', {}).get('preferences', {}).values() for item in vals) - if not any('linux-image' in item['package'] and 'wheezy-backports' in item['pin'] for item in prefs): - msg = 'The backports kernel is required for the docker daemon to function properly' - error(msg, ['packages', 'preferences']) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + from bootstrapvz.common.releases import get_release + if get_release(data['system']['release']) == wheezy: + # prefs is a generator of apt preferences across files in the manifest + prefs = (item for vals in data.get('packages', {}).get('preferences', {}).values() for item in vals) + if not any('linux-image' in item['package'] and 'wheezy-backports' in item['pin'] for item in prefs): + msg = 'The backports kernel is required for the docker daemon to function properly' + error(msg, ['packages', 'preferences']) def resolve_tasks(taskset, manifest): - if manifest.release == wheezy: - taskset.add(apt.AddBackports) - taskset.add(tasks.AddDockerDeps) - taskset.add(tasks.AddDockerBinary) - taskset.add(tasks.AddDockerInit) - taskset.add(tasks.EnableMemoryCgroup) - if len(manifest.plugins['docker_daemon'].get('pull_images', [])) > 0: - taskset.add(tasks.PullDockerImages) + if manifest.release == wheezy: + taskset.add(apt.AddBackports) + taskset.add(tasks.AddDockerDeps) + taskset.add(tasks.AddDockerBinary) + taskset.add(tasks.AddDockerInit) + taskset.add(tasks.EnableMemoryCgroup) + if len(manifest.plugins['docker_daemon'].get('pull_images', [])) > 0: + taskset.add(tasks.PullDockerImages) diff --git a/bootstrapvz/plugins/docker_daemon/tasks.py b/bootstrapvz/plugins/docker_daemon/tasks.py index 59bef18..380e5a2 100644 --- a/bootstrapvz/plugins/docker_daemon/tasks.py +++ b/bootstrapvz/plugins/docker_daemon/tasks.py @@ -15,108 +15,108 @@ ASSETS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), 'assets')) class AddDockerDeps(Task): - description = 'Add packages for docker deps' - phase = phases.package_installation - DOCKER_DEPS = ['aufs-tools', 'btrfs-tools', 'git', 'iptables', - 'procps', 'xz-utils', 'ca-certificates'] + description = 'Add packages for docker deps' + phase = phases.package_installation + DOCKER_DEPS = ['aufs-tools', 'btrfs-tools', 'git', 'iptables', + 'procps', 'xz-utils', 'ca-certificates'] - @classmethod - def run(cls, info): - for pkg in cls.DOCKER_DEPS: - info.packages.add(pkg) + @classmethod + def run(cls, info): + for pkg in cls.DOCKER_DEPS: + info.packages.add(pkg) class AddDockerBinary(Task): - description = 'Add docker binary' - phase = phases.system_modification + description = 'Add docker binary' + phase = phases.system_modification - @classmethod - def run(cls, info): - docker_version = info.manifest.plugins['docker_daemon'].get('version', False) - docker_url = 'https://get.docker.io/builds/Linux/x86_64/docker-' - if docker_version: - docker_url += docker_version - else: - docker_url += 'latest' - bin_docker = os.path.join(info.root, 'usr/bin/docker') - log_check_call(['wget', '-O', bin_docker, docker_url]) - os.chmod(bin_docker, 0755) + @classmethod + def run(cls, info): + docker_version = info.manifest.plugins['docker_daemon'].get('version', False) + docker_url = 'https://get.docker.io/builds/Linux/x86_64/docker-' + if docker_version: + docker_url += docker_version + else: + docker_url += 'latest' + bin_docker = os.path.join(info.root, 'usr/bin/docker') + log_check_call(['wget', '-O', bin_docker, docker_url]) + os.chmod(bin_docker, 0755) class AddDockerInit(Task): - description = 'Add docker init script' - phase = phases.system_modification - successors = [initd.InstallInitScripts] + description = 'Add docker init script' + phase = phases.system_modification + successors = [initd.InstallInitScripts] - @classmethod - def run(cls, info): - init_src = os.path.join(ASSETS_DIR, 'init.d/docker') - info.initd['install']['docker'] = init_src - default_src = os.path.join(ASSETS_DIR, 'default/docker') - default_dest = os.path.join(info.root, 'etc/default/docker') - shutil.copy(default_src, default_dest) - docker_opts = info.manifest.plugins['docker_daemon'].get('docker_opts') - if docker_opts: - sed_i(default_dest, r'^#*DOCKER_OPTS=.*$', 'DOCKER_OPTS="%s"' % docker_opts) + @classmethod + def run(cls, info): + init_src = os.path.join(ASSETS_DIR, 'init.d/docker') + info.initd['install']['docker'] = init_src + default_src = os.path.join(ASSETS_DIR, 'default/docker') + default_dest = os.path.join(info.root, 'etc/default/docker') + shutil.copy(default_src, default_dest) + docker_opts = info.manifest.plugins['docker_daemon'].get('docker_opts') + if docker_opts: + sed_i(default_dest, r'^#*DOCKER_OPTS=.*$', 'DOCKER_OPTS="%s"' % docker_opts) class EnableMemoryCgroup(Task): - description = 'Change grub configuration to enable the memory cgroup' - phase = phases.system_modification - successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] - predecessors = [grub.ConfigureGrub, gceboot.ConfigureGrub] + description = 'Change grub configuration to enable the memory cgroup' + phase = phases.system_modification + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] + predecessors = [grub.ConfigureGrub, gceboot.ConfigureGrub] - @classmethod - def run(cls, info): - grub_config = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1 cgroup_enable=memory"') + @classmethod + def run(cls, info): + grub_config = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1 cgroup_enable=memory"') class PullDockerImages(Task): - description = 'Pull docker images' - phase = phases.system_modification - predecessors = [AddDockerBinary] + description = 'Pull docker images' + phase = phases.system_modification + predecessors = [AddDockerBinary] - @classmethod - def run(cls, info): - from bootstrapvz.common.exceptions import TaskError - from subprocess import CalledProcessError - images = info.manifest.plugins['docker_daemon'].get('pull_images', []) - retries = info.manifest.plugins['docker_daemon'].get('pull_images_retries', 10) + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + from subprocess import CalledProcessError + images = info.manifest.plugins['docker_daemon'].get('pull_images', []) + retries = info.manifest.plugins['docker_daemon'].get('pull_images_retries', 10) - bin_docker = os.path.join(info.root, 'usr/bin/docker') - graph_dir = os.path.join(info.root, 'var/lib/docker') - socket = 'unix://' + os.path.join(info.workspace, 'docker.sock') - pidfile = os.path.join(info.workspace, 'docker.pid') + bin_docker = os.path.join(info.root, 'usr/bin/docker') + graph_dir = os.path.join(info.root, 'var/lib/docker') + socket = 'unix://' + os.path.join(info.workspace, 'docker.sock') + pidfile = os.path.join(info.workspace, 'docker.pid') - try: - # start docker daemon temporarly. - daemon = subprocess.Popen([bin_docker, '-d', '--graph', graph_dir, '-H', socket, '-p', pidfile]) - # wait for docker daemon to start. - for _ in range(retries): - try: - log_check_call([bin_docker, '-H', socket, 'version']) - break - except CalledProcessError: - time.sleep(1) - for img in images: - # docker load if tarball. - if img.endswith('.tar.gz') or img.endswith('.tgz'): - cmd = [bin_docker, '-H', socket, 'load', '-i', img] - try: - log_check_call(cmd) - except CalledProcessError as e: - msg = 'error {e} loading docker image {img}.'.format(img=img, e=e) - raise TaskError(msg) - # docker pull if image name. - else: - cmd = [bin_docker, '-H', socket, 'pull', img] - try: - log_check_call(cmd) - except CalledProcessError as e: - msg = 'error {e} pulling docker image {img}.'.format(img=img, e=e) - raise TaskError(msg) - finally: - # shutdown docker daemon. - daemon.terminate() - os.remove(os.path.join(info.workspace, 'docker.sock')) + try: + # start docker daemon temporarly. + daemon = subprocess.Popen([bin_docker, '-d', '--graph', graph_dir, '-H', socket, '-p', pidfile]) + # wait for docker daemon to start. + for _ in range(retries): + try: + log_check_call([bin_docker, '-H', socket, 'version']) + break + except CalledProcessError: + time.sleep(1) + for img in images: + # docker load if tarball. + if img.endswith('.tar.gz') or img.endswith('.tgz'): + cmd = [bin_docker, '-H', socket, 'load', '-i', img] + try: + log_check_call(cmd) + except CalledProcessError as e: + msg = 'error {e} loading docker image {img}.'.format(img=img, e=e) + raise TaskError(msg) + # docker pull if image name. + else: + cmd = [bin_docker, '-H', socket, 'pull', img] + try: + log_check_call(cmd) + except CalledProcessError as e: + msg = 'error {e} pulling docker image {img}.'.format(img=img, e=e) + raise TaskError(msg) + finally: + # shutdown docker daemon. + daemon.terminate() + os.remove(os.path.join(info.workspace, 'docker.sock')) diff --git a/bootstrapvz/plugins/ec2_launch/tasks.py b/bootstrapvz/plugins/ec2_launch/tasks.py index 5d4abc5..b64d4c3 100644 --- a/bootstrapvz/plugins/ec2_launch/tasks.py +++ b/bootstrapvz/plugins/ec2_launch/tasks.py @@ -6,13 +6,13 @@ import logging # TODO: Merge with the method available in wip-integration-tests branch def waituntil(predicate, timeout=5, interval=0.05): - import time - threshhold = time.time() + timeout - while time.time() < threshhold: - if predicate(): - return True - time.sleep(interval) - return False + import time + threshhold = time.time() + timeout + while time.time() < threshhold: + if predicate(): + return True + time.sleep(interval) + return False class LaunchEC2Instance(Task): diff --git a/bootstrapvz/plugins/ec2_publish/__init__.py b/bootstrapvz/plugins/ec2_publish/__init__.py index 9e14d49..4fd8292 100644 --- a/bootstrapvz/plugins/ec2_publish/__init__.py +++ b/bootstrapvz/plugins/ec2_publish/__init__.py @@ -1,15 +1,15 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - import tasks - taskset.add(tasks.CopyAmiToRegions) - if 'manifest_url' in manifest.plugins['ec2_publish']: - taskset.add(tasks.PublishAmiManifest) + import tasks + taskset.add(tasks.CopyAmiToRegions) + if 'manifest_url' in manifest.plugins['ec2_publish']: + taskset.add(tasks.PublishAmiManifest) - ami_public = manifest.plugins['ec2_publish'].get('public') - if ami_public: - taskset.add(tasks.PublishAmi) + ami_public = manifest.plugins['ec2_publish'].get('public') + if ami_public: + taskset.add(tasks.PublishAmi) diff --git a/bootstrapvz/plugins/ec2_publish/tasks.py b/bootstrapvz/plugins/ec2_publish/tasks.py index b4d36c2..388c7da 100644 --- a/bootstrapvz/plugins/ec2_publish/tasks.py +++ b/bootstrapvz/plugins/ec2_publish/tasks.py @@ -6,91 +6,91 @@ import logging class CopyAmiToRegions(Task): - description = 'Copy AWS AMI over other regions' - phase = phases.image_registration - predecessors = [ami.RegisterAMI] + description = 'Copy AWS AMI over other regions' + phase = phases.image_registration + predecessors = [ami.RegisterAMI] - @classmethod - def run(cls, info): - source_region = info._ec2['region'] - source_ami = info._ec2['image'] - name = info._ec2['ami_name'] - copy_description = "Copied from %s (%s)" % (source_ami, source_region) + @classmethod + def run(cls, info): + source_region = info._ec2['region'] + source_ami = info._ec2['image'] + name = info._ec2['ami_name'] + copy_description = "Copied from %s (%s)" % (source_ami, source_region) - connect_args = { - 'aws_access_key_id': info.credentials['access-key'], - 'aws_secret_access_key': info.credentials['secret-key'] - } - if 'security-token' in info.credentials: - connect_args['security_token'] = info.credentials['security-token'] + connect_args = { + 'aws_access_key_id': info.credentials['access-key'], + 'aws_secret_access_key': info.credentials['secret-key'] + } + if 'security-token' in info.credentials: + connect_args['security_token'] = info.credentials['security-token'] - region_amis = {source_region: source_ami} - region_conns = {source_region: info._ec2['connection']} - from boto.ec2 import connect_to_region - regions = info.manifest.plugins['ec2_publish'].get('regions', ()) - for region in regions: - conn = connect_to_region(region, **connect_args) - region_conns[region] = conn - copied_image = conn.copy_image(source_region, source_ami, name=name, description=copy_description) - region_amis[region] = copied_image.image_id - info._ec2['region_amis'] = region_amis - info._ec2['region_conns'] = region_conns + region_amis = {source_region: source_ami} + region_conns = {source_region: info._ec2['connection']} + from boto.ec2 import connect_to_region + regions = info.manifest.plugins['ec2_publish'].get('regions', ()) + for region in regions: + conn = connect_to_region(region, **connect_args) + region_conns[region] = conn + copied_image = conn.copy_image(source_region, source_ami, name=name, description=copy_description) + region_amis[region] = copied_image.image_id + info._ec2['region_amis'] = region_amis + info._ec2['region_conns'] = region_conns class PublishAmiManifest(Task): - description = 'Publish a manifest of generated AMIs' - phase = phases.image_registration - predecessors = [CopyAmiToRegions] + description = 'Publish a manifest of generated AMIs' + phase = phases.image_registration + predecessors = [CopyAmiToRegions] - @classmethod - def run(cls, info): - manifest_url = info.manifest.plugins['ec2_publish']['manifest_url'] + @classmethod + def run(cls, info): + manifest_url = info.manifest.plugins['ec2_publish']['manifest_url'] - import json - amis_json = json.dumps(info._ec2['region_amis']) + import json + amis_json = json.dumps(info._ec2['region_amis']) - from urlparse import urlparse - parsed_url = urlparse(manifest_url) - parsed_host = parsed_url.netloc - if not parsed_url.scheme: - with open(parsed_url.path, 'w') as local_out: - local_out.write(amis_json) - elif parsed_host.endswith('amazonaws.com') and 's3' in parsed_host: - region = 'us-east-1' - path = parsed_url.path[1:] - if 's3-' in parsed_host: - loc = parsed_host.find('s3-') + 3 - region = parsed_host[loc:parsed_host.find('.', loc)] + from urlparse import urlparse + parsed_url = urlparse(manifest_url) + parsed_host = parsed_url.netloc + if not parsed_url.scheme: + with open(parsed_url.path, 'w') as local_out: + local_out.write(amis_json) + elif parsed_host.endswith('amazonaws.com') and 's3' in parsed_host: + region = 'us-east-1' + path = parsed_url.path[1:] + if 's3-' in parsed_host: + loc = parsed_host.find('s3-') + 3 + region = parsed_host[loc:parsed_host.find('.', loc)] - if '.s3' in parsed_host: - bucket = parsed_host[:parsed_host.find('.s3')] - else: - bucket, path = path.split('/', 1) + if '.s3' in parsed_host: + bucket = parsed_host[:parsed_host.find('.s3')] + else: + bucket, path = path.split('/', 1) - from boto.s3 import connect_to_region - conn = connect_to_region(region) - key = conn.get_bucket(bucket, validate=False).new_key(path) - headers = {'Content-Type': 'application/json'} - key.set_contents_from_string(amis_json, headers=headers, policy='public-read') + from boto.s3 import connect_to_region + conn = connect_to_region(region) + key = conn.get_bucket(bucket, validate=False).new_key(path) + headers = {'Content-Type': 'application/json'} + key.set_contents_from_string(amis_json, headers=headers, policy='public-read') class PublishAmi(Task): - description = 'Make generated AMIs public' - phase = phases.image_registration - predecessors = [CopyAmiToRegions] + description = 'Make generated AMIs public' + phase = phases.image_registration + predecessors = [CopyAmiToRegions] - @classmethod - def run(cls, info): - region_conns = info._ec2['region_conns'] - region_amis = info._ec2['region_amis'] - logger = logging.getLogger(__name__) + @classmethod + def run(cls, info): + region_conns = info._ec2['region_conns'] + region_amis = info._ec2['region_amis'] + logger = logging.getLogger(__name__) - import time - for region, region_ami in region_amis.items(): - conn = region_conns[region] - current_image = conn.get_image(region_ami) - while current_image.state == 'pending': - logger.debug('Waiting for %s in %s (currently: %s)', region_ami, region, current_image.state) - time.sleep(5) - current_image = conn.get_image(region_ami) - conn.modify_image_attribute(region_ami, attribute='launchPermission', operation='add', groups='all') + import time + for region, region_ami in region_amis.items(): + conn = region_conns[region] + current_image = conn.get_image(region_ami) + while current_image.state == 'pending': + logger.debug('Waiting for %s in %s (currently: %s)', region_ami, region, current_image.state) + time.sleep(5) + current_image = conn.get_image(region_ami) + conn.modify_image_attribute(region_ami, attribute='launchPermission', operation='add', groups='all') diff --git a/bootstrapvz/plugins/file_copy/__init__.py b/bootstrapvz/plugins/file_copy/__init__.py index 475cbb6..eec9cb9 100644 --- a/bootstrapvz/plugins/file_copy/__init__.py +++ b/bootstrapvz/plugins/file_copy/__init__.py @@ -2,20 +2,20 @@ import tasks def validate_manifest(data, validator, error): - import os.path + import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) - for i, file_entry in enumerate(data['plugins']['file_copy']['files']): - srcfile = file_entry['src'] - if not os.path.isfile(srcfile): - msg = 'The source file %s does not exist.' % srcfile - error(msg, ['plugins', 'file_copy', 'files', i]) + for i, file_entry in enumerate(data['plugins']['file_copy']['files']): + srcfile = file_entry['src'] + if not os.path.isfile(srcfile): + msg = 'The source file %s does not exist.' % srcfile + error(msg, ['plugins', 'file_copy', 'files', i]) def resolve_tasks(taskset, manifest): - if ('mkdirs' in manifest.plugins['file_copy']): - taskset.add(tasks.MkdirCommand) - if ('files' in manifest.plugins['file_copy']): - taskset.add(tasks.FileCopyCommand) + if ('mkdirs' in manifest.plugins['file_copy']): + taskset.add(tasks.MkdirCommand) + if ('files' in manifest.plugins['file_copy']): + taskset.add(tasks.FileCopyCommand) diff --git a/bootstrapvz/plugins/file_copy/tasks.py b/bootstrapvz/plugins/file_copy/tasks.py index dd3e054..cc1a783 100644 --- a/bootstrapvz/plugins/file_copy/tasks.py +++ b/bootstrapvz/plugins/file_copy/tasks.py @@ -6,46 +6,46 @@ import shutil def modify_path(info, path, entry): - from bootstrapvz.common.tools import log_check_call - if 'permissions' in entry: - # We wrap the permissions string in str() in case - # the user specified a numeric bitmask - chmod_command = ['chroot', info.root, 'chmod', str(entry['permissions']), path] - log_check_call(chmod_command) + from bootstrapvz.common.tools import log_check_call + if 'permissions' in entry: + # We wrap the permissions string in str() in case + # the user specified a numeric bitmask + chmod_command = ['chroot', info.root, 'chmod', str(entry['permissions']), path] + log_check_call(chmod_command) - if 'owner' in entry: - chown_command = ['chroot', info.root, 'chown', entry['owner'], path] - log_check_call(chown_command) + if 'owner' in entry: + chown_command = ['chroot', info.root, 'chown', entry['owner'], path] + log_check_call(chown_command) - if 'group' in entry: - chgrp_command = ['chroot', info.root, 'chgrp', entry['group'], path] - log_check_call(chgrp_command) + if 'group' in entry: + chgrp_command = ['chroot', info.root, 'chgrp', entry['group'], path] + log_check_call(chgrp_command) class MkdirCommand(Task): - description = 'Creating directories requested by user' - phase = phases.user_modification + description = 'Creating directories requested by user' + phase = phases.user_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call - for dir_entry in info.manifest.plugins['file_copy']['mkdirs']: - mkdir_command = ['chroot', info.root, 'mkdir', '-p', dir_entry['dir']] - log_check_call(mkdir_command) - modify_path(info, dir_entry['dir'], dir_entry) + for dir_entry in info.manifest.plugins['file_copy']['mkdirs']: + mkdir_command = ['chroot', info.root, 'mkdir', '-p', dir_entry['dir']] + log_check_call(mkdir_command) + modify_path(info, dir_entry['dir'], dir_entry) class FileCopyCommand(Task): - description = 'Copying user specified files into the image' - phase = phases.user_modification - predecessors = [MkdirCommand] + description = 'Copying user specified files into the image' + phase = phases.user_modification + predecessors = [MkdirCommand] - @classmethod - def run(cls, info): - for file_entry in info.manifest.plugins['file_copy']['files']: - # note that we don't use os.path.join because it can't - # handle absolute paths, which 'dst' most likely is. - final_destination = os.path.normpath("%s/%s" % (info.root, file_entry['dst'])) - shutil.copy(file_entry['src'], final_destination) - modify_path(info, file_entry['dst'], file_entry) + @classmethod + def run(cls, info): + for file_entry in info.manifest.plugins['file_copy']['files']: + # note that we don't use os.path.join because it can't + # handle absolute paths, which 'dst' most likely is. + final_destination = os.path.normpath("%s/%s" % (info.root, file_entry['dst'])) + shutil.copy(file_entry['src'], final_destination) + modify_path(info, file_entry['dst'], file_entry) diff --git a/bootstrapvz/plugins/google_cloud_repo/__init__.py b/bootstrapvz/plugins/google_cloud_repo/__init__.py index 74c78cb..d8bad86 100644 --- a/bootstrapvz/plugins/google_cloud_repo/__init__.py +++ b/bootstrapvz/plugins/google_cloud_repo/__init__.py @@ -3,14 +3,14 @@ import os.path def validate_manifest(data, validator, error): - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.add(tasks.AddGoogleCloudRepoKey) - if manifest.plugins['google_cloud_repo'].get('enable_keyring_repo', False): - taskset.add(tasks.AddGoogleCloudRepoKeyringRepo) - taskset.add(tasks.InstallGoogleCloudRepoKeyringPackage) - if manifest.plugins['google_cloud_repo'].get('cleanup_bootstrap_key', False): - taskset.add(tasks.CleanupBootstrapRepoKey) + taskset.add(tasks.AddGoogleCloudRepoKey) + if manifest.plugins['google_cloud_repo'].get('enable_keyring_repo', False): + taskset.add(tasks.AddGoogleCloudRepoKeyringRepo) + taskset.add(tasks.InstallGoogleCloudRepoKeyringPackage) + if manifest.plugins['google_cloud_repo'].get('cleanup_bootstrap_key', False): + taskset.add(tasks.CleanupBootstrapRepoKey) diff --git a/bootstrapvz/plugins/google_cloud_repo/tasks.py b/bootstrapvz/plugins/google_cloud_repo/tasks.py index 2306e1d..6ecdd6f 100644 --- a/bootstrapvz/plugins/google_cloud_repo/tasks.py +++ b/bootstrapvz/plugins/google_cloud_repo/tasks.py @@ -7,43 +7,43 @@ import os class AddGoogleCloudRepoKey(Task): - description = 'Adding Google Cloud Repo key.' - phase = phases.package_installation - predecessors = [apt.InstallTrustedKeys] - successors = [apt.WriteSources] + description = 'Adding Google Cloud Repo key.' + phase = phases.package_installation + predecessors = [apt.InstallTrustedKeys] + successors = [apt.WriteSources] - @classmethod - def run(cls, info): - key_file = os.path.join(info.root, 'google.gpg.key') - log_check_call(['wget', 'https://packages.cloud.google.com/apt/doc/apt-key.gpg', '-O', key_file]) - log_check_call(['chroot', info.root, 'apt-key', 'add', 'google.gpg.key']) - os.remove(key_file) + @classmethod + def run(cls, info): + key_file = os.path.join(info.root, 'google.gpg.key') + log_check_call(['wget', 'https://packages.cloud.google.com/apt/doc/apt-key.gpg', '-O', key_file]) + log_check_call(['chroot', info.root, 'apt-key', 'add', 'google.gpg.key']) + os.remove(key_file) class AddGoogleCloudRepoKeyringRepo(Task): - description = 'Adding Google Cloud keyring repository.' - phase = phases.preparation - predecessors = [apt.AddManifestSources] + description = 'Adding Google Cloud keyring repository.' + phase = phases.preparation + predecessors = [apt.AddManifestSources] - @classmethod - def run(cls, info): - info.source_lists.add('google-cloud', 'deb http://packages.cloud.google.com/apt google-cloud-packages-archive-keyring-{system.release} main') + @classmethod + def run(cls, info): + info.source_lists.add('google-cloud', 'deb http://packages.cloud.google.com/apt google-cloud-packages-archive-keyring-{system.release} main') class InstallGoogleCloudRepoKeyringPackage(Task): - description = 'Installing Google Cloud key package.' - phase = phases.preparation - successors = [packages.AddManifestPackages] + description = 'Installing Google Cloud key package.' + phase = phases.preparation + successors = [packages.AddManifestPackages] - @classmethod - def run(cls, info): - info.packages.add('google-cloud-packages-archive-keyring') + @classmethod + def run(cls, info): + info.packages.add('google-cloud-packages-archive-keyring') class CleanupBootstrapRepoKey(Task): - description = 'Cleaning up bootstrap repo key.' - phase = phases.system_cleaning + description = 'Cleaning up bootstrap repo key.' + phase = phases.system_cleaning - @classmethod - def run(cls, info): - os.remove(os.path.join(info.root, 'etc', 'apt', 'trusted.gpg')) + @classmethod + def run(cls, info): + os.remove(os.path.join(info.root, 'etc', 'apt', 'trusted.gpg')) diff --git a/bootstrapvz/plugins/minimize_size/__init__.py b/bootstrapvz/plugins/minimize_size/__init__.py index 0f009e8..9e22404 100644 --- a/bootstrapvz/plugins/minimize_size/__init__.py +++ b/bootstrapvz/plugins/minimize_size/__init__.py @@ -6,52 +6,52 @@ from bootstrapvz.common.tasks import locale def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.yml') - validator(data, schema_path) - if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk': - error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink']) + import os.path + schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.yml') + validator(data, schema_path) + if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk': + error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink']) def resolve_tasks(taskset, manifest): - taskset.update([tasks.mounts.AddFolderMounts, - tasks.mounts.RemoveFolderMounts, - ]) - if manifest.plugins['minimize_size'].get('zerofree', False): - taskset.add(tasks.shrink.AddRequiredCommands) - taskset.add(tasks.shrink.Zerofree) - if manifest.plugins['minimize_size'].get('shrink', False): - taskset.add(tasks.shrink.AddRequiredCommands) - taskset.add(tasks.shrink.ShrinkVolume) - if 'apt' in manifest.plugins['minimize_size']: - apt = manifest.plugins['minimize_size']['apt'] - if apt.get('autoclean', False): - taskset.add(tasks.apt.AutomateAptClean) - if 'languages' in apt: - taskset.add(tasks.apt.FilterTranslationFiles) - if apt.get('gzip_indexes', False): - taskset.add(tasks.apt.AptGzipIndexes) - if apt.get('autoremove_suggests', False): - taskset.add(tasks.apt.AptAutoremoveSuggests) - filter_tasks = [tasks.dpkg.CreateDpkgCfg, - tasks.dpkg.InitializeBootstrapFilterList, - tasks.dpkg.CreateBootstrapFilterScripts, - tasks.dpkg.DeleteBootstrapFilterScripts, - ] - if 'dpkg' in manifest.plugins['minimize_size']: - dpkg = manifest.plugins['minimize_size']['dpkg'] - if 'locales' in dpkg: - taskset.update(filter_tasks) - taskset.add(tasks.dpkg.FilterLocales) - # If no locales are selected, we don't need the locale package - if len(dpkg['locales']) == 0: - taskset.discard(locale.LocaleBootstrapPackage) - taskset.discard(locale.GenerateLocale) - if dpkg.get('exclude_docs', False): - taskset.update(filter_tasks) - taskset.add(tasks.dpkg.ExcludeDocs) + taskset.update([tasks.mounts.AddFolderMounts, + tasks.mounts.RemoveFolderMounts, + ]) + if manifest.plugins['minimize_size'].get('zerofree', False): + taskset.add(tasks.shrink.AddRequiredCommands) + taskset.add(tasks.shrink.Zerofree) + if manifest.plugins['minimize_size'].get('shrink', False): + taskset.add(tasks.shrink.AddRequiredCommands) + taskset.add(tasks.shrink.ShrinkVolume) + if 'apt' in manifest.plugins['minimize_size']: + apt = manifest.plugins['minimize_size']['apt'] + if apt.get('autoclean', False): + taskset.add(tasks.apt.AutomateAptClean) + if 'languages' in apt: + taskset.add(tasks.apt.FilterTranslationFiles) + if apt.get('gzip_indexes', False): + taskset.add(tasks.apt.AptGzipIndexes) + if apt.get('autoremove_suggests', False): + taskset.add(tasks.apt.AptAutoremoveSuggests) + filter_tasks = [tasks.dpkg.CreateDpkgCfg, + tasks.dpkg.InitializeBootstrapFilterList, + tasks.dpkg.CreateBootstrapFilterScripts, + tasks.dpkg.DeleteBootstrapFilterScripts, + ] + if 'dpkg' in manifest.plugins['minimize_size']: + dpkg = manifest.plugins['minimize_size']['dpkg'] + if 'locales' in dpkg: + taskset.update(filter_tasks) + taskset.add(tasks.dpkg.FilterLocales) + # If no locales are selected, we don't need the locale package + if len(dpkg['locales']) == 0: + taskset.discard(locale.LocaleBootstrapPackage) + taskset.discard(locale.GenerateLocale) + if dpkg.get('exclude_docs', False): + taskset.update(filter_tasks) + taskset.add(tasks.dpkg.ExcludeDocs) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - counter_task(taskset, tasks.mounts.AddFolderMounts, tasks.mounts.RemoveFolderMounts) - counter_task(taskset, tasks.dpkg.CreateBootstrapFilterScripts, tasks.dpkg.DeleteBootstrapFilterScripts) + counter_task(taskset, tasks.mounts.AddFolderMounts, tasks.mounts.RemoveFolderMounts) + counter_task(taskset, tasks.dpkg.CreateBootstrapFilterScripts, tasks.dpkg.DeleteBootstrapFilterScripts) diff --git a/bootstrapvz/plugins/minimize_size/tasks/apt.py b/bootstrapvz/plugins/minimize_size/tasks/apt.py index b5b0ee2..6ff33c9 100644 --- a/bootstrapvz/plugins/minimize_size/tasks/apt.py +++ b/bootstrapvz/plugins/minimize_size/tasks/apt.py @@ -8,55 +8,55 @@ from . import assets class AutomateAptClean(Task): - description = 'Configuring apt to always clean everything out when it\'s done' - phase = phases.package_installation - successors = [apt.AptUpdate] - # Snatched from: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + description = 'Configuring apt to always clean everything out when it\'s done' + phase = phases.package_installation + successors = [apt.AptUpdate] + # Snatched from: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - @classmethod - def run(cls, info): - shutil.copy(os.path.join(assets, 'apt-clean'), - os.path.join(info.root, 'etc/apt/apt.conf.d/90clean')) + @classmethod + def run(cls, info): + shutil.copy(os.path.join(assets, 'apt-clean'), + os.path.join(info.root, 'etc/apt/apt.conf.d/90clean')) class FilterTranslationFiles(Task): - description = 'Configuring apt to only download and use specific translation files' - phase = phases.package_installation - successors = [apt.AptUpdate] - # Snatched from: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + description = 'Configuring apt to only download and use specific translation files' + phase = phases.package_installation + successors = [apt.AptUpdate] + # Snatched from: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - @classmethod - def run(cls, info): - langs = info.manifest.plugins['minimize_size']['apt']['languages'] - config = '; '.join(map(lambda l: '"' + l + '"', langs)) - config_path = os.path.join(info.root, 'etc/apt/apt.conf.d/20languages') - shutil.copy(os.path.join(assets, 'apt-languages'), config_path) - sed_i(config_path, r'ACQUIRE_LANGUAGES_FILTER', config) + @classmethod + def run(cls, info): + langs = info.manifest.plugins['minimize_size']['apt']['languages'] + config = '; '.join(map(lambda l: '"' + l + '"', langs)) + config_path = os.path.join(info.root, 'etc/apt/apt.conf.d/20languages') + shutil.copy(os.path.join(assets, 'apt-languages'), config_path) + sed_i(config_path, r'ACQUIRE_LANGUAGES_FILTER', config) class AptGzipIndexes(Task): - description = 'Configuring apt to always gzip lists files' - phase = phases.package_installation - successors = [apt.AptUpdate] - # Snatched from: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + description = 'Configuring apt to always gzip lists files' + phase = phases.package_installation + successors = [apt.AptUpdate] + # Snatched from: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - @classmethod - def run(cls, info): - shutil.copy(os.path.join(assets, 'apt-gzip-indexes'), - os.path.join(info.root, 'etc/apt/apt.conf.d/20gzip-indexes')) + @classmethod + def run(cls, info): + shutil.copy(os.path.join(assets, 'apt-gzip-indexes'), + os.path.join(info.root, 'etc/apt/apt.conf.d/20gzip-indexes')) class AptAutoremoveSuggests(Task): - description = 'Configuring apt to remove suggested packages when autoremoving' - phase = phases.package_installation - successors = [apt.AptUpdate] - # Snatched from: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + description = 'Configuring apt to remove suggested packages when autoremoving' + phase = phases.package_installation + successors = [apt.AptUpdate] + # Snatched from: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - @classmethod - def run(cls, info): - shutil.copy(os.path.join(assets, 'apt-autoremove-suggests'), - os.path.join(info.root, 'etc/apt/apt.conf.d/20autoremove-suggests')) + @classmethod + def run(cls, info): + shutil.copy(os.path.join(assets, 'apt-autoremove-suggests'), + os.path.join(info.root, 'etc/apt/apt.conf.d/20autoremove-suggests')) diff --git a/bootstrapvz/plugins/minimize_size/tasks/dpkg.py b/bootstrapvz/plugins/minimize_size/tasks/dpkg.py index 244ca97..c39f0c3 100644 --- a/bootstrapvz/plugins/minimize_size/tasks/dpkg.py +++ b/bootstrapvz/plugins/minimize_size/tasks/dpkg.py @@ -9,140 +9,140 @@ from . import assets class CreateDpkgCfg(Task): - description = 'Creating /etc/dpkg/dpkg.cfg.d before bootstrapping' - phase = phases.os_installation - successors = [bootstrap.Bootstrap] + description = 'Creating /etc/dpkg/dpkg.cfg.d before bootstrapping' + phase = phases.os_installation + successors = [bootstrap.Bootstrap] - @classmethod - def run(cls, info): - os.makedirs(os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d')) + @classmethod + def run(cls, info): + os.makedirs(os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d')) class InitializeBootstrapFilterList(Task): - description = 'Initializing the bootstrapping filter list' - phase = phases.preparation + description = 'Initializing the bootstrapping filter list' + phase = phases.preparation - @classmethod - def run(cls, info): - info._minimize_size['bootstrap_filter'] = {'exclude': [], 'include': []} + @classmethod + def run(cls, info): + info._minimize_size['bootstrap_filter'] = {'exclude': [], 'include': []} class CreateBootstrapFilterScripts(Task): - description = 'Creating the bootstrapping locales filter script' - phase = phases.os_installation - successors = [bootstrap.Bootstrap] - # Inspired by: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + description = 'Creating the bootstrapping locales filter script' + phase = phases.os_installation + successors = [bootstrap.Bootstrap] + # Inspired by: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - @classmethod - def run(cls, info): - if info.bootstrap_script is not None: - from bootstrapvz.common.exceptions import TaskError - raise TaskError('info.bootstrap_script seems to already be set ' - 'and is conflicting with this task') + @classmethod + def run(cls, info): + if info.bootstrap_script is not None: + from bootstrapvz.common.exceptions import TaskError + raise TaskError('info.bootstrap_script seems to already be set ' + 'and is conflicting with this task') - bootstrap_script = os.path.join(info.workspace, 'bootstrap_script.sh') - filter_script = os.path.join(info.workspace, 'bootstrap_files_filter.sh') - excludes_file = os.path.join(info.workspace, 'debootstrap-excludes') + bootstrap_script = os.path.join(info.workspace, 'bootstrap_script.sh') + filter_script = os.path.join(info.workspace, 'bootstrap_files_filter.sh') + excludes_file = os.path.join(info.workspace, 'debootstrap-excludes') - shutil.copy(os.path.join(assets, 'bootstrap-script.sh'), bootstrap_script) - shutil.copy(os.path.join(assets, 'bootstrap-files-filter.sh'), filter_script) + shutil.copy(os.path.join(assets, 'bootstrap-script.sh'), bootstrap_script) + shutil.copy(os.path.join(assets, 'bootstrap-files-filter.sh'), filter_script) - sed_i(bootstrap_script, r'DEBOOTSTRAP_EXCLUDES_PATH', excludes_file) - sed_i(bootstrap_script, r'BOOTSTRAP_FILES_FILTER_PATH', filter_script) + sed_i(bootstrap_script, r'DEBOOTSTRAP_EXCLUDES_PATH', excludes_file) + sed_i(bootstrap_script, r'BOOTSTRAP_FILES_FILTER_PATH', filter_script) - # We exclude with patterns but include with fixed strings - # The pattern matching when excluding is needed in order to filter - # everything below e.g. /usr/share/locale but not the folder itself - filter_lists = info._minimize_size['bootstrap_filter'] - exclude_list = '\|'.join(map(lambda p: '.' + p + '.\+', filter_lists['exclude'])) - include_list = '\n'.join(map(lambda p: '.' + p, filter_lists['include'])) - sed_i(filter_script, r'EXCLUDE_PATTERN', exclude_list) - sed_i(filter_script, r'INCLUDE_PATHS', include_list) - os.chmod(filter_script, 0755) + # We exclude with patterns but include with fixed strings + # The pattern matching when excluding is needed in order to filter + # everything below e.g. /usr/share/locale but not the folder itself + filter_lists = info._minimize_size['bootstrap_filter'] + exclude_list = '\|'.join(map(lambda p: '.' + p + '.\+', filter_lists['exclude'])) + include_list = '\n'.join(map(lambda p: '.' + p, filter_lists['include'])) + sed_i(filter_script, r'EXCLUDE_PATTERN', exclude_list) + sed_i(filter_script, r'INCLUDE_PATHS', include_list) + os.chmod(filter_script, 0755) - info.bootstrap_script = bootstrap_script - info._minimize_size['filter_script'] = filter_script + info.bootstrap_script = bootstrap_script + info._minimize_size['filter_script'] = filter_script class FilterLocales(Task): - description = 'Configuring dpkg and debootstrap to only include specific locales/manpages when installing packages' - phase = phases.os_installation - predecessors = [CreateDpkgCfg] - successors = [CreateBootstrapFilterScripts] - # Snatched from: - # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap - # and - # https://raphaelhertzog.com/2010/11/15/save-disk-space-by-excluding-useless-files-with-dpkg/ + description = 'Configuring dpkg and debootstrap to only include specific locales/manpages when installing packages' + phase = phases.os_installation + predecessors = [CreateDpkgCfg] + successors = [CreateBootstrapFilterScripts] + # Snatched from: + # https://github.com/docker/docker/blob/1d775a54cc67e27f755c7338c3ee938498e845d7/contrib/mkimage/debootstrap + # and + # https://raphaelhertzog.com/2010/11/15/save-disk-space-by-excluding-useless-files-with-dpkg/ - @classmethod - def run(cls, info): - # Filter when debootstrapping - info._minimize_size['bootstrap_filter']['exclude'].extend([ - '/usr/share/locale/', - '/usr/share/man/', - ]) + @classmethod + def run(cls, info): + # Filter when debootstrapping + info._minimize_size['bootstrap_filter']['exclude'].extend([ + '/usr/share/locale/', + '/usr/share/man/', + ]) - locales = info.manifest.plugins['minimize_size']['dpkg']['locales'] - info._minimize_size['bootstrap_filter']['include'].extend([ - '/usr/share/locale/locale.alias', - '/usr/share/man/man1', - '/usr/share/man/man2', - '/usr/share/man/man3', - '/usr/share/man/man4', - '/usr/share/man/man5', - '/usr/share/man/man6', - '/usr/share/man/man7', - '/usr/share/man/man8', - '/usr/share/man/man9', - ] + - map(lambda l: '/usr/share/locale/' + l + '/', locales) + - map(lambda l: '/usr/share/man/' + l + '/', locales) - ) + locales = info.manifest.plugins['minimize_size']['dpkg']['locales'] + info._minimize_size['bootstrap_filter']['include'].extend([ + '/usr/share/locale/locale.alias', + '/usr/share/man/man1', + '/usr/share/man/man2', + '/usr/share/man/man3', + '/usr/share/man/man4', + '/usr/share/man/man5', + '/usr/share/man/man6', + '/usr/share/man/man7', + '/usr/share/man/man8', + '/usr/share/man/man9', + ] + + map(lambda l: '/usr/share/locale/' + l + '/', locales) + + map(lambda l: '/usr/share/man/' + l + '/', locales) + ) - # Filter when installing things with dpkg - locale_lines = ['path-exclude=/usr/share/locale/*', - 'path-include=/usr/share/locale/locale.alias'] - manpages_lines = ['path-exclude=/usr/share/man/*', - 'path-include=/usr/share/man/man[1-9]'] + # Filter when installing things with dpkg + locale_lines = ['path-exclude=/usr/share/locale/*', + 'path-include=/usr/share/locale/locale.alias'] + manpages_lines = ['path-exclude=/usr/share/man/*', + 'path-include=/usr/share/man/man[1-9]'] - locales = info.manifest.plugins['minimize_size']['dpkg']['locales'] - locale_lines.extend(map(lambda l: 'path-include=/usr/share/locale/' + l + '/*', locales)) - manpages_lines.extend(map(lambda l: 'path-include=/usr/share/man/' + l + '/*', locales)) + locales = info.manifest.plugins['minimize_size']['dpkg']['locales'] + locale_lines.extend(map(lambda l: 'path-include=/usr/share/locale/' + l + '/*', locales)) + manpages_lines.extend(map(lambda l: 'path-include=/usr/share/man/' + l + '/*', locales)) - locales_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10filter-locales') - manpages_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10filter-manpages') + locales_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10filter-locales') + manpages_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10filter-manpages') - with open(locales_path, 'w') as locale_filter: - locale_filter.write('\n'.join(locale_lines) + '\n') - with open(manpages_path, 'w') as manpages_filter: - manpages_filter.write('\n'.join(manpages_lines) + '\n') + with open(locales_path, 'w') as locale_filter: + locale_filter.write('\n'.join(locale_lines) + '\n') + with open(manpages_path, 'w') as manpages_filter: + manpages_filter.write('\n'.join(manpages_lines) + '\n') class ExcludeDocs(Task): - description = 'Configuring dpkg and debootstrap to not install additional documentation for packages' - phase = phases.os_installation - predecessors = [CreateDpkgCfg] - successors = [CreateBootstrapFilterScripts] + description = 'Configuring dpkg and debootstrap to not install additional documentation for packages' + phase = phases.os_installation + predecessors = [CreateDpkgCfg] + successors = [CreateBootstrapFilterScripts] - @classmethod - def run(cls, info): - # "Packages must not require the existence of any files in /usr/share/doc/ in order to function [...]." - # Source: https://www.debian.org/doc/debian-policy/ch-docs.html - # So doing this should cause no problems. - info._minimize_size['bootstrap_filter']['exclude'].append('/usr/share/doc/') - exclude_docs_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10exclude-docs') - with open(exclude_docs_path, 'w') as exclude_docs: - exclude_docs.write('path-exclude=/usr/share/doc/*\n') + @classmethod + def run(cls, info): + # "Packages must not require the existence of any files in /usr/share/doc/ in order to function [...]." + # Source: https://www.debian.org/doc/debian-policy/ch-docs.html + # So doing this should cause no problems. + info._minimize_size['bootstrap_filter']['exclude'].append('/usr/share/doc/') + exclude_docs_path = os.path.join(info.root, 'etc/dpkg/dpkg.cfg.d/10exclude-docs') + with open(exclude_docs_path, 'w') as exclude_docs: + exclude_docs.write('path-exclude=/usr/share/doc/*\n') class DeleteBootstrapFilterScripts(Task): - description = 'Deleting the bootstrapping locales filter script' - phase = phases.cleaning - successors = [workspace.DeleteWorkspace] + description = 'Deleting the bootstrapping locales filter script' + phase = phases.cleaning + successors = [workspace.DeleteWorkspace] - @classmethod - def run(cls, info): - os.remove(info._minimize_size['filter_script']) - del info._minimize_size['filter_script'] - os.remove(info.bootstrap_script) + @classmethod + def run(cls, info): + os.remove(info._minimize_size['filter_script']) + del info._minimize_size['filter_script'] + os.remove(info.bootstrap_script) diff --git a/bootstrapvz/plugins/minimize_size/tasks/mounts.py b/bootstrapvz/plugins/minimize_size/tasks/mounts.py index 9441511..d7c967c 100644 --- a/bootstrapvz/plugins/minimize_size/tasks/mounts.py +++ b/bootstrapvz/plugins/minimize_size/tasks/mounts.py @@ -8,36 +8,36 @@ folders = ['tmp', 'var/lib/apt/lists'] class AddFolderMounts(Task): - description = 'Mounting folders for writing temporary and cache data' - phase = phases.os_installation - predecessors = [bootstrap.Bootstrap] + description = 'Mounting folders for writing temporary and cache data' + phase = phases.os_installation + predecessors = [bootstrap.Bootstrap] - @classmethod - def run(cls, info): - info._minimize_size['foldermounts'] = os.path.join(info.workspace, 'minimize_size') - os.mkdir(info._minimize_size['foldermounts']) - for folder in folders: - temp_path = os.path.join(info._minimize_size['foldermounts'], folder.replace('/', '_')) - os.mkdir(temp_path) + @classmethod + def run(cls, info): + info._minimize_size['foldermounts'] = os.path.join(info.workspace, 'minimize_size') + os.mkdir(info._minimize_size['foldermounts']) + for folder in folders: + temp_path = os.path.join(info._minimize_size['foldermounts'], folder.replace('/', '_')) + os.mkdir(temp_path) - full_path = os.path.join(info.root, folder) - info.volume.partition_map.root.add_mount(temp_path, full_path, ['--bind']) + full_path = os.path.join(info.root, folder) + info.volume.partition_map.root.add_mount(temp_path, full_path, ['--bind']) class RemoveFolderMounts(Task): - description = 'Removing folder mounts for temporary and cache data' - phase = phases.system_cleaning - successors = [apt.AptClean] + description = 'Removing folder mounts for temporary and cache data' + phase = phases.system_cleaning + successors = [apt.AptClean] - @classmethod - def run(cls, info): - import shutil - for folder in folders: - temp_path = os.path.join(info._minimize_size['foldermounts'], folder.replace('/', '_')) - full_path = os.path.join(info.root, folder) + @classmethod + def run(cls, info): + import shutil + for folder in folders: + temp_path = os.path.join(info._minimize_size['foldermounts'], folder.replace('/', '_')) + full_path = os.path.join(info.root, folder) - info.volume.partition_map.root.remove_mount(full_path) - shutil.rmtree(temp_path) + info.volume.partition_map.root.remove_mount(full_path) + shutil.rmtree(temp_path) - os.rmdir(info._minimize_size['foldermounts']) - del info._minimize_size['foldermounts'] + os.rmdir(info._minimize_size['foldermounts']) + del info._minimize_size['foldermounts'] diff --git a/bootstrapvz/plugins/minimize_size/tasks/shrink.py b/bootstrapvz/plugins/minimize_size/tasks/shrink.py index 09e2576..5b532ea 100644 --- a/bootstrapvz/plugins/minimize_size/tasks/shrink.py +++ b/bootstrapvz/plugins/minimize_size/tasks/shrink.py @@ -9,37 +9,37 @@ import os class AddRequiredCommands(Task): - description = 'Adding commands required for reducing volume size' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for reducing volume size' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - if info.manifest.plugins['minimize_size'].get('zerofree', False): - info.host_dependencies['zerofree'] = 'zerofree' - if info.manifest.plugins['minimize_size'].get('shrink', False): - link = 'https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0' - info.host_dependencies['vmware-vdiskmanager'] = link + @classmethod + def run(cls, info): + if info.manifest.plugins['minimize_size'].get('zerofree', False): + info.host_dependencies['zerofree'] = 'zerofree' + if info.manifest.plugins['minimize_size'].get('shrink', False): + link = 'https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0' + info.host_dependencies['vmware-vdiskmanager'] = link class Zerofree(Task): - description = 'Zeroing unused blocks on the root partition' - phase = phases.volume_unmounting - predecessors = [filesystem.UnmountRoot] - successors = [partitioning.UnmapPartitions, volume.Detach] + description = 'Zeroing unused blocks on the root partition' + phase = phases.volume_unmounting + predecessors = [filesystem.UnmountRoot] + successors = [partitioning.UnmapPartitions, volume.Detach] - @classmethod - def run(cls, info): - log_check_call(['zerofree', info.volume.partition_map.root.device_path]) + @classmethod + def run(cls, info): + log_check_call(['zerofree', info.volume.partition_map.root.device_path]) class ShrinkVolume(Task): - description = 'Shrinking the volume' - phase = phases.volume_unmounting - predecessors = [volume.Detach] + description = 'Shrinking the volume' + phase = phases.volume_unmounting + predecessors = [volume.Detach] - @classmethod - def run(cls, info): - perm = os.stat(info.volume.image_path).st_mode & 0777 - log_check_call(['/usr/bin/vmware-vdiskmanager', '-k', info.volume.image_path]) - os.chmod(info.volume.image_path, perm) + @classmethod + def run(cls, info): + perm = os.stat(info.volume.image_path).st_mode & 0777 + log_check_call(['/usr/bin/vmware-vdiskmanager', '-k', info.volume.image_path]) + os.chmod(info.volume.image_path, perm) diff --git a/bootstrapvz/plugins/ntp/__init__.py b/bootstrapvz/plugins/ntp/__init__.py index 858af72..5e5715b 100644 --- a/bootstrapvz/plugins/ntp/__init__.py +++ b/bootstrapvz/plugins/ntp/__init__.py @@ -1,11 +1,11 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - import tasks - taskset.add(tasks.AddNtpPackage) - if manifest.plugins['ntp'].get('servers', False): - taskset.add(tasks.SetNtpServers) + import tasks + taskset.add(tasks.AddNtpPackage) + if manifest.plugins['ntp'].get('servers', False): + taskset.add(tasks.SetNtpServers) diff --git a/bootstrapvz/plugins/ntp/tasks.py b/bootstrapvz/plugins/ntp/tasks.py index a13c77b..126ba9f 100644 --- a/bootstrapvz/plugins/ntp/tasks.py +++ b/bootstrapvz/plugins/ntp/tasks.py @@ -3,30 +3,30 @@ from bootstrapvz.common import phases class AddNtpPackage(Task): - description = 'Adding NTP Package' - phase = phases.preparation + description = 'Adding NTP Package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('ntp') + @classmethod + def run(cls, info): + info.packages.add('ntp') class SetNtpServers(Task): - description = 'Setting NTP servers' - phase = phases.system_modification + description = 'Setting NTP servers' + phase = phases.system_modification - @classmethod - def run(cls, info): - import fileinput - import os - import re - ntp_path = os.path.join(info.root, 'etc/ntp.conf') - servers = list(info.manifest.plugins['ntp']['servers']) - debian_ntp_server = re.compile('.*[0-9]\.debian\.pool\.ntp\.org.*') - for line in fileinput.input(files=ntp_path, inplace=True): - # Will write all the specified servers on the first match, then supress all other default servers - if re.match(debian_ntp_server, line): - while servers: - print 'server {server_address} iburst'.format(server_address=servers.pop(0)) - else: - print line, + @classmethod + def run(cls, info): + import fileinput + import os + import re + ntp_path = os.path.join(info.root, 'etc/ntp.conf') + servers = list(info.manifest.plugins['ntp']['servers']) + debian_ntp_server = re.compile('.*[0-9]\.debian\.pool\.ntp\.org.*') + for line in fileinput.input(files=ntp_path, inplace=True): + # Will write all the specified servers on the first match, then supress all other default servers + if re.match(debian_ntp_server, line): + while servers: + print 'server {server_address} iburst'.format(server_address=servers.pop(0)) + else: + print line, diff --git a/bootstrapvz/plugins/opennebula/__init__.py b/bootstrapvz/plugins/opennebula/__init__.py index 8681d18..5d0fcab 100644 --- a/bootstrapvz/plugins/opennebula/__init__.py +++ b/bootstrapvz/plugins/opennebula/__init__.py @@ -1,9 +1,9 @@ def resolve_tasks(taskset, manifest): - import tasks - from bootstrapvz.common.tasks import apt - from bootstrapvz.common.releases import wheezy - if manifest.release == wheezy: - taskset.add(apt.AddBackports) - taskset.update([tasks.AddONEContextPackage]) + import tasks + from bootstrapvz.common.tasks import apt + from bootstrapvz.common.releases import wheezy + if manifest.release == wheezy: + taskset.add(apt.AddBackports) + taskset.update([tasks.AddONEContextPackage]) diff --git a/bootstrapvz/plugins/opennebula/tasks.py b/bootstrapvz/plugins/opennebula/tasks.py index 1dcac7e..64ca9f2 100644 --- a/bootstrapvz/plugins/opennebula/tasks.py +++ b/bootstrapvz/plugins/opennebula/tasks.py @@ -4,14 +4,14 @@ from bootstrapvz.common import phases class AddONEContextPackage(Task): - description = 'Adding the OpenNebula context package' - phase = phases.preparation - predecessors = [apt.AddBackports] + description = 'Adding the OpenNebula context package' + phase = phases.preparation + predecessors = [apt.AddBackports] - @classmethod - def run(cls, info): - target = None - from bootstrapvz.common.releases import wheezy - if info.manifest.release == wheezy: - target = '{system.release}-backports' - info.packages.add('opennebula-context', target) + @classmethod + def run(cls, info): + target = None + from bootstrapvz.common.releases import wheezy + if info.manifest.release == wheezy: + target = '{system.release}-backports' + info.packages.add('opennebula-context', target) diff --git a/bootstrapvz/plugins/pip_install/__init__.py b/bootstrapvz/plugins/pip_install/__init__.py index 0f6810b..d88acec 100644 --- a/bootstrapvz/plugins/pip_install/__init__.py +++ b/bootstrapvz/plugins/pip_install/__init__.py @@ -2,11 +2,11 @@ import tasks def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.add(tasks.AddPipPackage) - taskset.add(tasks.PipInstallCommand) + taskset.add(tasks.AddPipPackage) + taskset.add(tasks.PipInstallCommand) diff --git a/bootstrapvz/plugins/pip_install/tasks.py b/bootstrapvz/plugins/pip_install/tasks.py index c6ff89b..d43abeb 100644 --- a/bootstrapvz/plugins/pip_install/tasks.py +++ b/bootstrapvz/plugins/pip_install/tasks.py @@ -3,23 +3,23 @@ from bootstrapvz.common import phases class AddPipPackage(Task): - description = 'Adding `pip\' and Co. to the image packages' - phase = phases.preparation + description = 'Adding `pip\' and Co. to the image packages' + phase = phases.preparation - @classmethod - def run(cls, info): - for package_name in ('python-pip', 'build-essential', 'python-dev'): - info.packages.add(package_name) + @classmethod + def run(cls, info): + for package_name in ('python-pip', 'build-essential', 'python-dev'): + info.packages.add(package_name) class PipInstallCommand(Task): - description = 'Install python packages from pypi with pip' - phase = phases.system_modification + description = 'Install python packages from pypi with pip' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - packages = info.manifest.plugins['pip_install']['packages'] - pip_install_command = ['chroot', info.root, 'pip', 'install'] - pip_install_command.extend(packages) - log_check_call(pip_install_command) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + packages = info.manifest.plugins['pip_install']['packages'] + pip_install_command = ['chroot', info.root, 'pip', 'install'] + pip_install_command.extend(packages) + log_check_call(pip_install_command) diff --git a/bootstrapvz/plugins/prebootstrapped/__init__.py b/bootstrapvz/plugins/prebootstrapped/__init__.py index 614e049..2dc9796 100644 --- a/bootstrapvz/plugins/prebootstrapped/__init__.py +++ b/bootstrapvz/plugins/prebootstrapped/__init__.py @@ -14,44 +14,44 @@ from bootstrapvz.common.tasks import partitioning def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - settings = manifest.plugins['prebootstrapped'] - skip_tasks = [ebs.Create, - loopback.Create, + settings = manifest.plugins['prebootstrapped'] + skip_tasks = [ebs.Create, + loopback.Create, - filesystem.Format, - partitioning.PartitionVolume, - filesystem.TuneVolumeFS, - filesystem.AddXFSProgs, - filesystem.CreateBootMountDir, + filesystem.Format, + partitioning.PartitionVolume, + filesystem.TuneVolumeFS, + filesystem.AddXFSProgs, + filesystem.CreateBootMountDir, - apt.DisableDaemonAutostart, - locale.GenerateLocale, - bootstrap.MakeTarball, - bootstrap.Bootstrap, - guest_additions.InstallGuestAdditions, - ] - if manifest.volume['backing'] == 'ebs': - if settings.get('snapshot', None) is not None: - taskset.add(CreateFromSnapshot) - [taskset.discard(task) for task in skip_tasks] - else: - taskset.add(Snapshot) - else: - if settings.get('image', None) is not None: - taskset.add(CreateFromImage) - [taskset.discard(task) for task in skip_tasks] - else: - taskset.add(CopyImage) + apt.DisableDaemonAutostart, + locale.GenerateLocale, + bootstrap.MakeTarball, + bootstrap.Bootstrap, + guest_additions.InstallGuestAdditions, + ] + if manifest.volume['backing'] == 'ebs': + if settings.get('snapshot', None) is not None: + taskset.add(CreateFromSnapshot) + [taskset.discard(task) for task in skip_tasks] + else: + taskset.add(Snapshot) + else: + if settings.get('image', None) is not None: + taskset.add(CreateFromImage) + [taskset.discard(task) for task in skip_tasks] + else: + taskset.add(CopyImage) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - if manifest.volume['backing'] == 'ebs': - counter_task(taskset, CreateFromSnapshot, volume.Delete) - else: - counter_task(taskset, CreateFromImage, volume.Delete) + if manifest.volume['backing'] == 'ebs': + counter_task(taskset, CreateFromSnapshot, volume.Delete) + else: + counter_task(taskset, CreateFromImage, volume.Delete) diff --git a/bootstrapvz/plugins/prebootstrapped/tasks.py b/bootstrapvz/plugins/prebootstrapped/tasks.py index df09782..53c7d6d 100644 --- a/bootstrapvz/plugins/prebootstrapped/tasks.py +++ b/bootstrapvz/plugins/prebootstrapped/tasks.py @@ -13,83 +13,83 @@ log = logging.getLogger(__name__) class Snapshot(Task): - description = 'Creating a snapshot of the bootstrapped volume' - phase = phases.package_installation - predecessors = [packages.InstallPackages, guest_additions.InstallGuestAdditions] + description = 'Creating a snapshot of the bootstrapped volume' + phase = phases.package_installation + predecessors = [packages.InstallPackages, guest_additions.InstallGuestAdditions] - @classmethod - def run(cls, info): - snapshot = None - with unmounted(info.volume): - snapshot = info.volume.snapshot() - msg = 'A snapshot of the bootstrapped volume was created. ID: ' + snapshot.id - log.info(msg) + @classmethod + def run(cls, info): + snapshot = None + with unmounted(info.volume): + snapshot = info.volume.snapshot() + msg = 'A snapshot of the bootstrapped volume was created. ID: ' + snapshot.id + log.info(msg) class CreateFromSnapshot(Task): - description = 'Creating EBS volume from a snapshot' - phase = phases.volume_creation - successors = [ebs.Attach] + description = 'Creating EBS volume from a snapshot' + phase = phases.volume_creation + successors = [ebs.Attach] - @classmethod - def run(cls, info): - snapshot = info.manifest.plugins['prebootstrapped']['snapshot'] - ebs_volume = info._ec2['connection'].create_volume(info.volume.size.bytes.get_qty_in('GiB'), - info._ec2['host']['availabilityZone'], - snapshot=snapshot) - while ebs_volume.volume_state() != 'available': - time.sleep(5) - ebs_volume.update() + @classmethod + def run(cls, info): + snapshot = info.manifest.plugins['prebootstrapped']['snapshot'] + ebs_volume = info._ec2['connection'].create_volume(info.volume.size.bytes.get_qty_in('GiB'), + info._ec2['host']['availabilityZone'], + snapshot=snapshot) + while ebs_volume.volume_state() != 'available': + time.sleep(5) + ebs_volume.update() - info.volume.volume = ebs_volume - set_fs_states(info.volume) + info.volume.volume = ebs_volume + set_fs_states(info.volume) class CopyImage(Task): - description = 'Creating a snapshot of the bootstrapped volume' - phase = phases.package_installation - predecessors = [packages.InstallPackages, guest_additions.InstallGuestAdditions] + description = 'Creating a snapshot of the bootstrapped volume' + phase = phases.package_installation + predecessors = [packages.InstallPackages, guest_additions.InstallGuestAdditions] - @classmethod - def run(cls, info): - loopback_backup_name = 'volume-{id}.{ext}.backup'.format(id=info.run_id, ext=info.volume.extension) - destination = os.path.join(info.manifest.bootstrapper['workspace'], loopback_backup_name) + @classmethod + def run(cls, info): + loopback_backup_name = 'volume-{id}.{ext}.backup'.format(id=info.run_id, ext=info.volume.extension) + destination = os.path.join(info.manifest.bootstrapper['workspace'], loopback_backup_name) - with unmounted(info.volume): - copyfile(info.volume.image_path, destination) - msg = 'A copy of the bootstrapped volume was created. Path: ' + destination - log.info(msg) + with unmounted(info.volume): + copyfile(info.volume.image_path, destination) + msg = 'A copy of the bootstrapped volume was created. Path: ' + destination + log.info(msg) class CreateFromImage(Task): - description = 'Creating loopback image from a copy' - phase = phases.volume_creation - successors = [volume.Attach] + description = 'Creating loopback image from a copy' + phase = phases.volume_creation + successors = [volume.Attach] - @classmethod - def run(cls, info): - info.volume.image_path = os.path.join(info.workspace, 'volume.' + info.volume.extension) - loopback_backup_path = info.manifest.plugins['prebootstrapped']['image'] - copyfile(loopback_backup_path, info.volume.image_path) + @classmethod + def run(cls, info): + info.volume.image_path = os.path.join(info.workspace, 'volume.' + info.volume.extension) + loopback_backup_path = info.manifest.plugins['prebootstrapped']['image'] + copyfile(loopback_backup_path, info.volume.image_path) - set_fs_states(info.volume) + set_fs_states(info.volume) def set_fs_states(volume): - volume.fsm.current = 'detached' + volume.fsm.current = 'detached' - p_map = volume.partition_map - from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - if not isinstance(p_map, NoPartitions): - p_map.fsm.current = 'unmapped' + p_map = volume.partition_map + from bootstrapvz.base.fs.partitionmaps.none import NoPartitions + if not isinstance(p_map, NoPartitions): + p_map.fsm.current = 'unmapped' - from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition - from bootstrapvz.base.fs.partitions.single import SinglePartition - for partition in p_map.partitions: - if isinstance(partition, UnformattedPartition): - partition.fsm.current = 'unmapped' - continue - if isinstance(partition, SinglePartition): - partition.fsm.current = 'formatted' - continue - partition.fsm.current = 'unmapped_fmt' + from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition + from bootstrapvz.base.fs.partitions.single import SinglePartition + for partition in p_map.partitions: + if isinstance(partition, UnformattedPartition): + partition.fsm.current = 'unmapped' + continue + if isinstance(partition, SinglePartition): + partition.fsm.current = 'formatted' + continue + partition.fsm.current = 'unmapped_fmt' diff --git a/bootstrapvz/plugins/puppet/__init__.py b/bootstrapvz/plugins/puppet/__init__.py index 529c1d5..81d5b39 100644 --- a/bootstrapvz/plugins/puppet/__init__.py +++ b/bootstrapvz/plugins/puppet/__init__.py @@ -2,18 +2,18 @@ import tasks def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.add(tasks.AddPackages) - if 'assets' in manifest.plugins['puppet']: - taskset.add(tasks.CheckAssetsPath) - taskset.add(tasks.CopyPuppetAssets) - if 'manifest' in manifest.plugins['puppet']: - taskset.add(tasks.CheckManifestPath) - taskset.add(tasks.ApplyPuppetManifest) - if manifest.plugins['puppet'].get('enable_agent', False): - taskset.add(tasks.EnableAgent) + taskset.add(tasks.AddPackages) + if 'assets' in manifest.plugins['puppet']: + taskset.add(tasks.CheckAssetsPath) + taskset.add(tasks.CopyPuppetAssets) + if 'manifest' in manifest.plugins['puppet']: + taskset.add(tasks.CheckManifestPath) + taskset.add(tasks.ApplyPuppetManifest) + if manifest.plugins['puppet'].get('enable_agent', False): + taskset.add(tasks.EnableAgent) diff --git a/bootstrapvz/plugins/puppet/tasks.py b/bootstrapvz/plugins/puppet/tasks.py index 5c5ac22..5ea3d4d 100644 --- a/bootstrapvz/plugins/puppet/tasks.py +++ b/bootstrapvz/plugins/puppet/tasks.py @@ -5,88 +5,88 @@ import os class CheckAssetsPath(Task): - description = 'Checking whether the assets path exist' - phase = phases.preparation + description = 'Checking whether the assets path exist' + phase = phases.preparation - @classmethod - def run(cls, info): - from bootstrapvz.common.exceptions import TaskError - assets = info.manifest.plugins['puppet']['assets'] - if not os.path.exists(assets): - msg = 'The assets directory {assets} does not exist.'.format(assets=assets) - raise TaskError(msg) - if not os.path.isdir(assets): - msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets) - raise TaskError(msg) + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + assets = info.manifest.plugins['puppet']['assets'] + if not os.path.exists(assets): + msg = 'The assets directory {assets} does not exist.'.format(assets=assets) + raise TaskError(msg) + if not os.path.isdir(assets): + msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets) + raise TaskError(msg) class CheckManifestPath(Task): - description = 'Checking whether the manifest path exist' - phase = phases.preparation + description = 'Checking whether the manifest path exist' + phase = phases.preparation - @classmethod - def run(cls, info): - from bootstrapvz.common.exceptions import TaskError - manifest = info.manifest.plugins['puppet']['manifest'] - if not os.path.exists(manifest): - msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest) - raise TaskError(msg) - if not os.path.isfile(manifest): - msg = 'The manifest path {manifest} does not point to a file.'.format(manifest=manifest) - raise TaskError(msg) + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + manifest = info.manifest.plugins['puppet']['manifest'] + if not os.path.exists(manifest): + msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest) + raise TaskError(msg) + if not os.path.isfile(manifest): + msg = 'The manifest path {manifest} does not point to a file.'.format(manifest=manifest) + raise TaskError(msg) class AddPackages(Task): - description = 'Add puppet package' - phase = phases.preparation + description = 'Add puppet package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('puppet') + @classmethod + def run(cls, info): + info.packages.add('puppet') class CopyPuppetAssets(Task): - description = 'Copying puppet assets' - phase = phases.system_modification + description = 'Copying puppet assets' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import copy_tree - copy_tree(info.manifest.plugins['puppet']['assets'], os.path.join(info.root, 'etc/puppet')) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import copy_tree + copy_tree(info.manifest.plugins['puppet']['assets'], os.path.join(info.root, 'etc/puppet')) class ApplyPuppetManifest(Task): - description = 'Applying puppet manifest' - phase = phases.user_modification + description = 'Applying puppet manifest' + phase = phases.user_modification - @classmethod - def run(cls, info): - with open(os.path.join(info.root, 'etc/hostname')) as handle: - hostname = handle.read().strip() - with open(os.path.join(info.root, 'etc/hosts'), 'a') as handle: - handle.write('127.0.0.1\t{hostname}\n'.format(hostname=hostname)) + @classmethod + def run(cls, info): + with open(os.path.join(info.root, 'etc/hostname')) as handle: + hostname = handle.read().strip() + with open(os.path.join(info.root, 'etc/hosts'), 'a') as handle: + handle.write('127.0.0.1\t{hostname}\n'.format(hostname=hostname)) - from shutil import copy - pp_manifest = info.manifest.plugins['puppet']['manifest'] - manifest_rel_dst = os.path.join('tmp', os.path.basename(pp_manifest)) - manifest_dst = os.path.join(info.root, manifest_rel_dst) - copy(pp_manifest, manifest_dst) + from shutil import copy + pp_manifest = info.manifest.plugins['puppet']['manifest'] + manifest_rel_dst = os.path.join('tmp', os.path.basename(pp_manifest)) + manifest_dst = os.path.join(info.root, manifest_rel_dst) + copy(pp_manifest, manifest_dst) - manifest_path = os.path.join('/', manifest_rel_dst) - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, - 'puppet', 'apply', manifest_path]) - os.remove(manifest_dst) + manifest_path = os.path.join('/', manifest_rel_dst) + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, + 'puppet', 'apply', manifest_path]) + os.remove(manifest_dst) - hosts_path = os.path.join(info.root, 'etc/hosts') - sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '') + hosts_path = os.path.join(info.root, 'etc/hosts') + sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '') class EnableAgent(Task): - description = 'Enabling the puppet agent' - phase = phases.system_modification + description = 'Enabling the puppet agent' + phase = phases.system_modification - @classmethod - def run(cls, info): - puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet') - sed_i(puppet_defaults, 'START=no', 'START=yes') + @classmethod + def run(cls, info): + puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet') + sed_i(puppet_defaults, 'START=no', 'START=yes') diff --git a/bootstrapvz/plugins/root_password/__init__.py b/bootstrapvz/plugins/root_password/__init__.py index f97e9f8..7fc41f1 100644 --- a/bootstrapvz/plugins/root_password/__init__.py +++ b/bootstrapvz/plugins/root_password/__init__.py @@ -1,14 +1,14 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - from bootstrapvz.common.tasks import ssh - from tasks import SetRootPassword - taskset.discard(ssh.DisableSSHPasswordAuthentication) - taskset.add(ssh.EnableRootLogin) - taskset.add(SetRootPassword) + from bootstrapvz.common.tasks import ssh + from tasks import SetRootPassword + taskset.discard(ssh.DisableSSHPasswordAuthentication) + taskset.add(ssh.EnableRootLogin) + taskset.add(SetRootPassword) diff --git a/bootstrapvz/plugins/root_password/tasks.py b/bootstrapvz/plugins/root_password/tasks.py index 359a0d4..b4ee7b3 100644 --- a/bootstrapvz/plugins/root_password/tasks.py +++ b/bootstrapvz/plugins/root_password/tasks.py @@ -3,11 +3,11 @@ from bootstrapvz.common import phases class SetRootPassword(Task): - description = 'Setting the root password' - phase = phases.system_modification + description = 'Setting the root password' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, 'chpasswd'], - 'root:' + info.manifest.plugins['root_password']['password']) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, 'chpasswd'], + 'root:' + info.manifest.plugins['root_password']['password']) diff --git a/bootstrapvz/plugins/salt/__init__.py b/bootstrapvz/plugins/salt/__init__.py index f165f49..b69c806 100644 --- a/bootstrapvz/plugins/salt/__init__.py +++ b/bootstrapvz/plugins/salt/__init__.py @@ -2,13 +2,13 @@ import tasks def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.add(tasks.InstallSaltDependencies) - taskset.add(tasks.BootstrapSaltMinion) - if 'grains' in manifest.plugins['salt']: - taskset.add(tasks.SetSaltGrains) + taskset.add(tasks.InstallSaltDependencies) + taskset.add(tasks.BootstrapSaltMinion) + if 'grains' in manifest.plugins['salt']: + taskset.add(tasks.SetSaltGrains) diff --git a/bootstrapvz/plugins/salt/tasks.py b/bootstrapvz/plugins/salt/tasks.py index 008a678..41bfa3c 100644 --- a/bootstrapvz/plugins/salt/tasks.py +++ b/bootstrapvz/plugins/salt/tasks.py @@ -8,53 +8,53 @@ import urllib class InstallSaltDependencies(Task): - description = 'Add depended packages for salt-minion' - phase = phases.preparation + description = 'Add depended packages for salt-minion' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('curl') - info.packages.add('ca-certificates') + @classmethod + def run(cls, info): + info.packages.add('curl') + info.packages.add('ca-certificates') class BootstrapSaltMinion(Task): - description = 'Installing salt-minion using the bootstrap script' - phase = phases.package_installation - predecessors = [packages.InstallPackages] + description = 'Installing salt-minion using the bootstrap script' + phase = phases.package_installation + predecessors = [packages.InstallPackages] - @classmethod - def run(cls, info): - # Download bootstrap script - bootstrap_script = os.path.join(info.root, 'install_salt.sh') - with open(bootstrap_script, 'w') as f: - d = urllib.urlopen('http://bootstrap.saltstack.org') - f.write(d.read()) + @classmethod + def run(cls, info): + # Download bootstrap script + bootstrap_script = os.path.join(info.root, 'install_salt.sh') + with open(bootstrap_script, 'w') as f: + d = urllib.urlopen('http://bootstrap.saltstack.org') + f.write(d.read()) - # This is needed since bootstrap doesn't handle -X for debian distros properly. - # We disable checking for running services at end since we do not start them. - sed_i(bootstrap_script, 'install_debian_check_services', 'disabled_debian_check_services') + # This is needed since bootstrap doesn't handle -X for debian distros properly. + # We disable checking for running services at end since we do not start them. + sed_i(bootstrap_script, 'install_debian_check_services', 'disabled_debian_check_services') - bootstrap_command = ['chroot', info.root, 'bash', 'install_salt.sh', '-X'] + bootstrap_command = ['chroot', info.root, 'bash', 'install_salt.sh', '-X'] - if 'master' in info.manifest.plugins['salt']: - bootstrap_command.extend(['-A', info.manifest.plugins['salt']['master']]) + if 'master' in info.manifest.plugins['salt']: + bootstrap_command.extend(['-A', info.manifest.plugins['salt']['master']]) - install_source = info.manifest.plugins['salt'].get('install_source', 'stable') + install_source = info.manifest.plugins['salt'].get('install_source', 'stable') - bootstrap_command.append(install_source) - if install_source == 'git' and ('version' in info.manifest.plugins['salt']): - bootstrap_command.append(info.manifest.plugins['salt']['version']) - log_check_call(bootstrap_command) + bootstrap_command.append(install_source) + if install_source == 'git' and ('version' in info.manifest.plugins['salt']): + bootstrap_command.append(info.manifest.plugins['salt']['version']) + log_check_call(bootstrap_command) class SetSaltGrains(Task): - description = 'Set custom salt grains' - phase = phases.system_modification + description = 'Set custom salt grains' + phase = phases.system_modification - @classmethod - def run(cls, info): - grains_file = os.path.join(info.root, 'etc/salt/grains') - grains = info.manifest.plugins['salt']['grains'] - with open(grains_file, 'a') as f: - for g in grains: - f.write('%s: %s\n' % (g, grains[g])) + @classmethod + def run(cls, info): + grains_file = os.path.join(info.root, 'etc/salt/grains') + grains = info.manifest.plugins['salt']['grains'] + with open(grains_file, 'a') as f: + for g in grains: + f.write('%s: %s\n' % (g, grains[g])) diff --git a/bootstrapvz/plugins/unattended_upgrades/__init__.py b/bootstrapvz/plugins/unattended_upgrades/__init__.py index dbf5ebd..c8190c2 100644 --- a/bootstrapvz/plugins/unattended_upgrades/__init__.py +++ b/bootstrapvz/plugins/unattended_upgrades/__init__.py @@ -1,12 +1,12 @@ def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - import tasks - taskset.add(tasks.AddUnattendedUpgradesPackage) - taskset.add(tasks.EnablePeriodicUpgrades) + import tasks + taskset.add(tasks.AddUnattendedUpgradesPackage) + taskset.add(tasks.EnablePeriodicUpgrades) diff --git a/bootstrapvz/plugins/unattended_upgrades/tasks.py b/bootstrapvz/plugins/unattended_upgrades/tasks.py index dd24fb5..d56b415 100644 --- a/bootstrapvz/plugins/unattended_upgrades/tasks.py +++ b/bootstrapvz/plugins/unattended_upgrades/tasks.py @@ -3,37 +3,37 @@ from bootstrapvz.common import phases class AddUnattendedUpgradesPackage(Task): - description = 'Adding `unattended-upgrades\' to the image packages' - phase = phases.preparation + description = 'Adding `unattended-upgrades\' to the image packages' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('unattended-upgrades') + @classmethod + def run(cls, info): + info.packages.add('unattended-upgrades') class EnablePeriodicUpgrades(Task): - description = 'Writing the periodic upgrades apt config file' - phase = phases.system_modification + description = 'Writing the periodic upgrades apt config file' + phase = phases.system_modification - @classmethod - def run(cls, info): - import os.path - periodic_path = os.path.join(info.root, 'etc/apt/apt.conf.d/02periodic') - update_interval = info.manifest.plugins['unattended_upgrades']['update_interval'] - download_interval = info.manifest.plugins['unattended_upgrades']['download_interval'] - upgrade_interval = info.manifest.plugins['unattended_upgrades']['upgrade_interval'] - with open(periodic_path, 'w') as periodic: - periodic.write(('// Enable the update/upgrade script (0=disable)\n' - 'APT::Periodic::Enable "1";\n\n' - '// Do "apt-get update" automatically every n-days (0=disable)\n' - 'APT::Periodic::Update-Package-Lists "{update_interval}";\n\n' - '// Do "apt-get upgrade --download-only" every n-days (0=disable)\n' - 'APT::Periodic::Download-Upgradeable-Packages "{download_interval}";\n\n' - '// Run the "unattended-upgrade" security upgrade script\n' - '// every n-days (0=disabled)\n' - '// Requires the package "unattended-upgrades" and will write\n' - '// a log in /var/log/unattended-upgrades\n' - 'APT::Periodic::Unattended-Upgrade "{upgrade_interval}";\n' - .format(update_interval=update_interval, - download_interval=download_interval, - upgrade_interval=upgrade_interval))) + @classmethod + def run(cls, info): + import os.path + periodic_path = os.path.join(info.root, 'etc/apt/apt.conf.d/02periodic') + update_interval = info.manifest.plugins['unattended_upgrades']['update_interval'] + download_interval = info.manifest.plugins['unattended_upgrades']['download_interval'] + upgrade_interval = info.manifest.plugins['unattended_upgrades']['upgrade_interval'] + with open(periodic_path, 'w') as periodic: + periodic.write(('// Enable the update/upgrade script (0=disable)\n' + 'APT::Periodic::Enable "1";\n\n' + '// Do "apt-get update" automatically every n-days (0=disable)\n' + 'APT::Periodic::Update-Package-Lists "{update_interval}";\n\n' + '// Do "apt-get upgrade --download-only" every n-days (0=disable)\n' + 'APT::Periodic::Download-Upgradeable-Packages "{download_interval}";\n\n' + '// Run the "unattended-upgrade" security upgrade script\n' + '// every n-days (0=disabled)\n' + '// Requires the package "unattended-upgrades" and will write\n' + '// a log in /var/log/unattended-upgrades\n' + 'APT::Periodic::Unattended-Upgrade "{upgrade_interval}";\n' + .format(update_interval=update_interval, + download_interval=download_interval, + upgrade_interval=upgrade_interval))) diff --git a/bootstrapvz/plugins/vagrant/__init__.py b/bootstrapvz/plugins/vagrant/__init__.py index bfe6168..4bcc552 100644 --- a/bootstrapvz/plugins/vagrant/__init__.py +++ b/bootstrapvz/plugins/vagrant/__init__.py @@ -7,28 +7,28 @@ import os def validate_manifest(data, validator, error): - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.ssh_group) + taskset.update(task_groups.ssh_group) - taskset.discard(image.MoveImage) - taskset.discard(ssh.DisableSSHPasswordAuthentication) + taskset.discard(image.MoveImage) + taskset.discard(ssh.DisableSSHPasswordAuthentication) - taskset.update([tasks.CheckBoxPath, - tasks.CreateVagrantBoxDir, - tasks.AddPackages, - tasks.CreateVagrantUser, - tasks.PasswordlessSudo, - tasks.SetRootPassword, - tasks.AddInsecurePublicKey, - tasks.PackageBox, - tasks.RemoveVagrantBoxDir, - volume.Delete, - ]) + taskset.update([tasks.CheckBoxPath, + tasks.CreateVagrantBoxDir, + tasks.AddPackages, + tasks.CreateVagrantUser, + tasks.PasswordlessSudo, + tasks.SetRootPassword, + tasks.AddInsecurePublicKey, + tasks.PackageBox, + tasks.RemoveVagrantBoxDir, + volume.Delete, + ]) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - counter_task(taskset, tasks.CreateVagrantBoxDir, tasks.RemoveVagrantBoxDir) + counter_task(taskset, tasks.CreateVagrantBoxDir, tasks.RemoveVagrantBoxDir) diff --git a/bootstrapvz/plugins/vagrant/tasks.py b/bootstrapvz/plugins/vagrant/tasks.py index 9bdda2a..9cd741f 100644 --- a/bootstrapvz/plugins/vagrant/tasks.py +++ b/bootstrapvz/plugins/vagrant/tasks.py @@ -8,225 +8,225 @@ assets = os.path.normpath(os.path.join(os.path.dirname(__file__), 'assets')) class CheckBoxPath(Task): - description = 'Checking if the vagrant box file already exists' - phase = phases.preparation + description = 'Checking if the vagrant box file already exists' + phase = phases.preparation - @classmethod - def run(cls, info): - box_basename = info.manifest.name.format(**info.manifest_vars) - box_name = box_basename + '.box' - box_path = os.path.join(info.manifest.bootstrapper['workspace'], box_name) - if os.path.exists(box_path): - from bootstrapvz.common.exceptions import TaskError - msg = 'The vagrant box `{name}\' already exists at `{path}\''.format(name=box_name, path=box_path) - raise TaskError(msg) - info._vagrant['box_name'] = box_name - info._vagrant['box_path'] = box_path + @classmethod + def run(cls, info): + box_basename = info.manifest.name.format(**info.manifest_vars) + box_name = box_basename + '.box' + box_path = os.path.join(info.manifest.bootstrapper['workspace'], box_name) + if os.path.exists(box_path): + from bootstrapvz.common.exceptions import TaskError + msg = 'The vagrant box `{name}\' already exists at `{path}\''.format(name=box_name, path=box_path) + raise TaskError(msg) + info._vagrant['box_name'] = box_name + info._vagrant['box_path'] = box_path class CreateVagrantBoxDir(Task): - description = 'Creating directory for the vagrant box' - phase = phases.preparation - predecessors = [workspace.CreateWorkspace, CheckBoxPath] + description = 'Creating directory for the vagrant box' + phase = phases.preparation + predecessors = [workspace.CreateWorkspace, CheckBoxPath] - @classmethod - def run(cls, info): - info._vagrant['folder'] = os.path.join(info.workspace, 'vagrant') - os.mkdir(info._vagrant['folder']) + @classmethod + def run(cls, info): + info._vagrant['folder'] = os.path.join(info.workspace, 'vagrant') + os.mkdir(info._vagrant['folder']) class AddPackages(Task): - description = 'Add packages that vagrant depends on' - phase = phases.preparation + description = 'Add packages that vagrant depends on' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('openssh-server') - info.packages.add('sudo') - info.packages.add('nfs-client') + @classmethod + def run(cls, info): + info.packages.add('openssh-server') + info.packages.add('sudo') + info.packages.add('nfs-client') class CreateVagrantUser(Task): - description = 'Creating the vagrant user' - phase = phases.system_modification + description = 'Creating the vagrant user' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, - 'useradd', - '--create-home', '--shell', '/bin/bash', - 'vagrant']) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, + 'useradd', + '--create-home', '--shell', '/bin/bash', + 'vagrant']) class PasswordlessSudo(Task): - description = 'Allowing the vagrant user to use sudo without a password' - phase = phases.system_modification + description = 'Allowing the vagrant user to use sudo without a password' + phase = phases.system_modification - @classmethod - def run(cls, info): - sudo_vagrant_path = os.path.join(info.root, 'etc/sudoers.d/vagrant') - with open(sudo_vagrant_path, 'w') as sudo_vagrant: - sudo_vagrant.write('vagrant ALL=(ALL) NOPASSWD:ALL') - import stat - ug_read_only = (stat.S_IRUSR | stat.S_IRGRP) - os.chmod(sudo_vagrant_path, ug_read_only) + @classmethod + def run(cls, info): + sudo_vagrant_path = os.path.join(info.root, 'etc/sudoers.d/vagrant') + with open(sudo_vagrant_path, 'w') as sudo_vagrant: + sudo_vagrant.write('vagrant ALL=(ALL) NOPASSWD:ALL') + import stat + ug_read_only = (stat.S_IRUSR | stat.S_IRGRP) + os.chmod(sudo_vagrant_path, ug_read_only) class AddInsecurePublicKey(Task): - description = 'Adding vagrant insecure public key' - phase = phases.system_modification - predecessors = [CreateVagrantUser] + description = 'Adding vagrant insecure public key' + phase = phases.system_modification + predecessors = [CreateVagrantUser] - @classmethod - def run(cls, info): - ssh_dir = os.path.join(info.root, 'home/vagrant/.ssh') - os.mkdir(ssh_dir) + @classmethod + def run(cls, info): + ssh_dir = os.path.join(info.root, 'home/vagrant/.ssh') + os.mkdir(ssh_dir) - authorized_keys_source_path = os.path.join(assets, 'authorized_keys') - with open(authorized_keys_source_path, 'r') as authorized_keys_source: - insecure_public_key = authorized_keys_source.read() + authorized_keys_source_path = os.path.join(assets, 'authorized_keys') + with open(authorized_keys_source_path, 'r') as authorized_keys_source: + insecure_public_key = authorized_keys_source.read() - authorized_keys_path = os.path.join(ssh_dir, 'authorized_keys') - with open(authorized_keys_path, 'a') as authorized_keys: - authorized_keys.write(insecure_public_key) + authorized_keys_path = os.path.join(ssh_dir, 'authorized_keys') + with open(authorized_keys_path, 'a') as authorized_keys: + authorized_keys.write(insecure_public_key) - import stat - os.chmod(ssh_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) - os.chmod(authorized_keys_path, stat.S_IRUSR | stat.S_IWUSR) + import stat + os.chmod(ssh_dir, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + os.chmod(authorized_keys_path, stat.S_IRUSR | stat.S_IWUSR) - # We can't do this directly with python, since getpwnam gets its info from the host - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, - 'chown', 'vagrant:vagrant', - '/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys']) + # We can't do this directly with python, since getpwnam gets its info from the host + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, + 'chown', 'vagrant:vagrant', + '/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys']) class SetRootPassword(Task): - description = 'Setting the root password to `vagrant\'' - phase = phases.system_modification + description = 'Setting the root password to `vagrant\'' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, 'chpasswd'], 'root:vagrant') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, 'chpasswd'], 'root:vagrant') class PackageBox(Task): - description = 'Packaging the volume as a vagrant box' - phase = phases.image_registration + description = 'Packaging the volume as a vagrant box' + phase = phases.image_registration - @classmethod - def run(cls, info): - vagrantfile_source = os.path.join(assets, 'Vagrantfile') - vagrantfile = os.path.join(info._vagrant['folder'], 'Vagrantfile') - shutil.copy(vagrantfile_source, vagrantfile) + @classmethod + def run(cls, info): + vagrantfile_source = os.path.join(assets, 'Vagrantfile') + vagrantfile = os.path.join(info._vagrant['folder'], 'Vagrantfile') + shutil.copy(vagrantfile_source, vagrantfile) - import random - mac_address = '080027{mac:06X}'.format(mac=random.randrange(16 ** 6)) - from bootstrapvz.common.tools import sed_i - sed_i(vagrantfile, '\\[MAC_ADDRESS\\]', mac_address) + import random + mac_address = '080027{mac:06X}'.format(mac=random.randrange(16 ** 6)) + from bootstrapvz.common.tools import sed_i + sed_i(vagrantfile, '\\[MAC_ADDRESS\\]', mac_address) - metadata_source = os.path.join(assets, 'metadata.json') - metadata = os.path.join(info._vagrant['folder'], 'metadata.json') - shutil.copy(metadata_source, metadata) + metadata_source = os.path.join(assets, 'metadata.json') + metadata = os.path.join(info._vagrant['folder'], 'metadata.json') + shutil.copy(metadata_source, metadata) - from bootstrapvz.common.tools import log_check_call - disk_name = 'box-disk1.' + info.volume.extension - disk_link = os.path.join(info._vagrant['folder'], disk_name) - log_check_call(['ln', '-s', info.volume.image_path, disk_link]) + from bootstrapvz.common.tools import log_check_call + disk_name = 'box-disk1.' + info.volume.extension + disk_link = os.path.join(info._vagrant['folder'], disk_name) + log_check_call(['ln', '-s', info.volume.image_path, disk_link]) - ovf_path = os.path.join(info._vagrant['folder'], 'box.ovf') - cls.write_ovf(info, ovf_path, mac_address, disk_name) + ovf_path = os.path.join(info._vagrant['folder'], 'box.ovf') + cls.write_ovf(info, ovf_path, mac_address, disk_name) - box_files = os.listdir(info._vagrant['folder']) - log_check_call(['tar', '--create', '--gzip', '--dereference', - '--file', info._vagrant['box_path'], - '--directory', info._vagrant['folder']] + box_files - ) - import logging - logging.getLogger(__name__).info('The vagrant box has been placed at ' + info._vagrant['box_path']) + box_files = os.listdir(info._vagrant['folder']) + log_check_call(['tar', '--create', '--gzip', '--dereference', + '--file', info._vagrant['box_path'], + '--directory', info._vagrant['folder']] + box_files + ) + import logging + logging.getLogger(__name__).info('The vagrant box has been placed at ' + info._vagrant['box_path']) - @classmethod - def write_ovf(cls, info, destination, mac_address, disk_name): - namespaces = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1', - 'rasd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData', - 'vssd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData', - 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', - 'vbox': 'http://www.virtualbox.org/ovf/machine', - } + @classmethod + def write_ovf(cls, info, destination, mac_address, disk_name): + namespaces = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1', + 'rasd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData', + 'vssd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData', + 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', + 'vbox': 'http://www.virtualbox.org/ovf/machine', + } - def attr(element, name, value=None): - for prefix, ns in namespaces.iteritems(): - name = name.replace(prefix + ':', '{' + ns + '}') - if value is None: - return element.attrib[name] - else: - element.attrib[name] = str(value) + def attr(element, name, value=None): + for prefix, ns in namespaces.iteritems(): + name = name.replace(prefix + ':', '{' + ns + '}') + if value is None: + return element.attrib[name] + else: + element.attrib[name] = str(value) - template_path = os.path.join(assets, 'box.ovf') - import xml.etree.ElementTree as ET - template = ET.parse(template_path) - root = template.getroot() + template_path = os.path.join(assets, 'box.ovf') + import xml.etree.ElementTree as ET + template = ET.parse(template_path) + root = template.getroot() - [disk_ref] = root.findall('./ovf:References/ovf:File', namespaces) - attr(disk_ref, 'ovf:href', disk_name) + [disk_ref] = root.findall('./ovf:References/ovf:File', namespaces) + attr(disk_ref, 'ovf:href', disk_name) - # List of OVF disk format URIs - # Snatched from VBox source (src/VBox/Main/src-server/ApplianceImpl.cpp:47) - # ISOURI = "http://www.ecma-international.org/publications/standards/Ecma-119.htm" - # VMDKStreamURI = "http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" - # VMDKSparseURI = "http://www.vmware.com/specifications/vmdk.html#sparse" - # VMDKCompressedURI = "http://www.vmware.com/specifications/vmdk.html#compressed" - # VMDKCompressedURI2 = "http://www.vmware.com/interfaces/specifications/vmdk.html#compressed" - # VHDURI = "http://go.microsoft.com/fwlink/?LinkId=137171" - volume_uuid = info.volume.get_uuid() - [disk] = root.findall('./ovf:DiskSection/ovf:Disk', namespaces) - attr(disk, 'ovf:capacity', info.volume.size.bytes.get_qty_in('B')) - attr(disk, 'ovf:format', info.volume.ovf_uri) - attr(disk, 'vbox:uuid', volume_uuid) + # List of OVF disk format URIs + # Snatched from VBox source (src/VBox/Main/src-server/ApplianceImpl.cpp:47) + # ISOURI = "http://www.ecma-international.org/publications/standards/Ecma-119.htm" + # VMDKStreamURI = "http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" + # VMDKSparseURI = "http://www.vmware.com/specifications/vmdk.html#sparse" + # VMDKCompressedURI = "http://www.vmware.com/specifications/vmdk.html#compressed" + # VMDKCompressedURI2 = "http://www.vmware.com/interfaces/specifications/vmdk.html#compressed" + # VHDURI = "http://go.microsoft.com/fwlink/?LinkId=137171" + volume_uuid = info.volume.get_uuid() + [disk] = root.findall('./ovf:DiskSection/ovf:Disk', namespaces) + attr(disk, 'ovf:capacity', info.volume.size.bytes.get_qty_in('B')) + attr(disk, 'ovf:format', info.volume.ovf_uri) + attr(disk, 'vbox:uuid', volume_uuid) - [system] = root.findall('./ovf:VirtualSystem', namespaces) - attr(system, 'ovf:id', info._vagrant['box_name']) + [system] = root.findall('./ovf:VirtualSystem', namespaces) + attr(system, 'ovf:id', info._vagrant['box_name']) - # Set the operating system - [os_section] = system.findall('./ovf:OperatingSystemSection', namespaces) - os_info = {'i386': {'id': 96, 'name': 'Debian'}, - 'amd64': {'id': 96, 'name': 'Debian_64'} - }.get(info.manifest.system['architecture']) - attr(os_section, 'ovf:id', os_info['id']) - [os_desc] = os_section.findall('./ovf:Description', namespaces) - os_desc.text = os_info['name'] - [os_type] = os_section.findall('./vbox:OSType', namespaces) - os_type.text = os_info['name'] + # Set the operating system + [os_section] = system.findall('./ovf:OperatingSystemSection', namespaces) + os_info = {'i386': {'id': 96, 'name': 'Debian'}, + 'amd64': {'id': 96, 'name': 'Debian_64'} + }.get(info.manifest.system['architecture']) + attr(os_section, 'ovf:id', os_info['id']) + [os_desc] = os_section.findall('./ovf:Description', namespaces) + os_desc.text = os_info['name'] + [os_type] = os_section.findall('./vbox:OSType', namespaces) + os_type.text = os_info['name'] - [sysid] = system.findall('./ovf:VirtualHardwareSection/ovf:System/' - 'vssd:VirtualSystemIdentifier', namespaces) - sysid.text = info._vagrant['box_name'] + [sysid] = system.findall('./ovf:VirtualHardwareSection/ovf:System/' + 'vssd:VirtualSystemIdentifier', namespaces) + sysid.text = info._vagrant['box_name'] - [machine] = system.findall('./vbox:Machine', namespaces) - import uuid - attr(machine, 'ovf:uuid', uuid.uuid4()) - attr(machine, 'ovf:name', info._vagrant['box_name']) - from datetime import datetime - attr(machine, 'ovf:lastStateChange', datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')) - [nic] = machine.findall('./ovf:Hardware/ovf:Network/ovf:Adapter', namespaces) - attr(machine, 'ovf:MACAddress', mac_address) + [machine] = system.findall('./vbox:Machine', namespaces) + import uuid + attr(machine, 'ovf:uuid', uuid.uuid4()) + attr(machine, 'ovf:name', info._vagrant['box_name']) + from datetime import datetime + attr(machine, 'ovf:lastStateChange', datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')) + [nic] = machine.findall('./ovf:Hardware/ovf:Network/ovf:Adapter', namespaces) + attr(machine, 'ovf:MACAddress', mac_address) - [device_img] = machine.findall('./ovf:StorageControllers' - '/ovf:StorageController[@name="SATA Controller"]' - '/ovf:AttachedDevice/ovf:Image', namespaces) - attr(device_img, 'uuid', '{' + str(volume_uuid) + '}') + [device_img] = machine.findall('./ovf:StorageControllers' + '/ovf:StorageController[@name="SATA Controller"]' + '/ovf:AttachedDevice/ovf:Image', namespaces) + attr(device_img, 'uuid', '{' + str(volume_uuid) + '}') - template.write(destination, xml_declaration=True) # , default_namespace=namespaces['ovf'] + template.write(destination, xml_declaration=True) # , default_namespace=namespaces['ovf'] class RemoveVagrantBoxDir(Task): - description = 'Removing the vagrant box directory' - phase = phases.cleaning - successors = [workspace.DeleteWorkspace] + description = 'Removing the vagrant box directory' + phase = phases.cleaning + successors = [workspace.DeleteWorkspace] - @classmethod - def run(cls, info): - shutil.rmtree(info._vagrant['folder']) - del info._vagrant['folder'] + @classmethod + def run(cls, info): + shutil.rmtree(info._vagrant['folder']) + del info._vagrant['folder'] diff --git a/bootstrapvz/providers/azure/__init__.py b/bootstrapvz/providers/azure/__init__.py index 3bac007..aa06af0 100644 --- a/bootstrapvz/providers/azure/__init__.py +++ b/bootstrapvz/providers/azure/__init__.py @@ -9,27 +9,27 @@ from bootstrapvz.common.tasks import apt def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update([apt.AddBackports, - tasks.packages.DefaultPackages, - loopback.AddRequiredCommands, - loopback.Create, - image.MoveImage, - initd.InstallInitScripts, - ssh.AddOpenSSHPackage, - ssh.ShredHostkeys, - ssh.AddSSHKeyGeneration, - tasks.packages.Waagent, - tasks.boot.ConfigureGrub, - tasks.boot.PatchUdev, - ]) + taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update([apt.AddBackports, + tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, + loopback.Create, + image.MoveImage, + initd.InstallInitScripts, + ssh.AddOpenSSHPackage, + ssh.ShredHostkeys, + ssh.AddSSHKeyGeneration, + tasks.packages.Waagent, + tasks.boot.ConfigureGrub, + tasks.boot.PatchUdev, + ]) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/azure/tasks/boot.py b/bootstrapvz/providers/azure/tasks/boot.py index 892a842..9d7e6a8 100644 --- a/bootstrapvz/providers/azure/tasks/boot.py +++ b/bootstrapvz/providers/azure/tasks/boot.py @@ -6,31 +6,31 @@ import os class PatchUdev(Task): - description = 'Patching udev configuration to remove ROOTDELAY sleep' - phase = phases.system_modification - successors = [kernel.UpdateInitramfs] + description = 'Patching udev configuration to remove ROOTDELAY sleep' + phase = phases.system_modification + successors = [kernel.UpdateInitramfs] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - from . import assets - # c.f. http://anonscm.debian.org/cgit/pkg-systemd/systemd.git/commit/?id=61e055638cea - udev_file = os.path.join(info.root, 'usr/share/initramfs-tools/scripts/init-top/udev') - diff_file = os.path.join(assets, 'udev.diff') - log_check_call(['patch', '--no-backup-if-mismatch', udev_file, diff_file]) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + from . import assets + # c.f. http://anonscm.debian.org/cgit/pkg-systemd/systemd.git/commit/?id=61e055638cea + udev_file = os.path.join(info.root, 'usr/share/initramfs-tools/scripts/init-top/udev') + diff_file = os.path.join(assets, 'udev.diff') + log_check_call(['patch', '--no-backup-if-mismatch', udev_file, diff_file]) class ConfigureGrub(Task): - description = 'Change grub configuration to allow for ttyS0 output' - phase = phases.system_modification - predecessors = [grub.ConfigureGrub] - successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] + description = 'Change grub configuration to allow for ttyS0 output' + phase = phases.system_modification + predecessors = [grub.ConfigureGrub] + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - grub_config = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX_DEFAULT=.*)', r'GRUB_CMDLINE_LINUX_DEFAULT=""') - sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1console=tty0 console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300"') - sed_i(grub_config, r'^(GRUB_HIDDEN_TIMEOUT=).*', r'#GRUB_HIDDEN_TIMEOUT=true') - sed_i(grub_config, r'^.*(GRUB_TIMEOUT=).*$', r'GRUB_TIMEOUT=5') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + grub_config = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX_DEFAULT=.*)', r'GRUB_CMDLINE_LINUX_DEFAULT=""') + sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1console=tty0 console=ttyS0,115200n8 earlyprintk=ttyS0,115200 rootdelay=300"') + sed_i(grub_config, r'^(GRUB_HIDDEN_TIMEOUT=).*', r'#GRUB_HIDDEN_TIMEOUT=true') + sed_i(grub_config, r'^.*(GRUB_TIMEOUT=).*$', r'GRUB_TIMEOUT=5') diff --git a/bootstrapvz/providers/azure/tasks/packages.py b/bootstrapvz/providers/azure/tasks/packages.py index 2089404..cd8f6d6 100644 --- a/bootstrapvz/providers/azure/tasks/packages.py +++ b/bootstrapvz/providers/azure/tasks/packages.py @@ -4,52 +4,52 @@ from bootstrapvz.common.tasks.packages import InstallPackages class DefaultPackages(Task): - description = 'Adding image packages required for Azure' - phase = phases.preparation + description = 'Adding image packages required for Azure' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('openssl') - info.packages.add('python-openssl') - info.packages.add('python-pyasn1') - info.packages.add('sudo') - info.packages.add('parted') + @classmethod + def run(cls, info): + info.packages.add('openssl') + info.packages.add('python-openssl') + info.packages.add('python-pyasn1') + info.packages.add('sudo') + info.packages.add('parted') - import os.path - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) class Waagent(Task): - description = 'Add waagent' - phase = phases.package_installation - predecessors = [InstallPackages] + description = 'Add waagent' + phase = phases.package_installation + predecessors = [InstallPackages] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_check_call - import os - waagent_version = info.manifest.provider['waagent']['version'] - waagent_file = 'WALinuxAgent-' + waagent_version + '.tar.gz' - waagent_url = 'https://github.com/Azure/WALinuxAgent/archive/' + waagent_file - log_check_call(['wget', '-P', info.root, waagent_url]) - waagent_directory = os.path.join(info.root, 'root') - log_check_call(['tar', 'xaf', os.path.join(info.root, waagent_file), '-C', waagent_directory]) - os.remove(os.path.join(info.root, waagent_file)) - waagent_script = '/root/WALinuxAgent-WALinuxAgent-' + waagent_version + '/waagent' - log_check_call(['chroot', info.root, 'cp', waagent_script, '/usr/sbin/waagent']) - log_check_call(['chroot', info.root, 'chmod', '755', '/usr/sbin/waagent']) - log_check_call(['chroot', info.root, 'waagent', '-install']) - if info.manifest.provider['waagent'].get('conf', False): - if os.path.isfile(info.manifest.provider['waagent']['conf']): - log_check_call(['cp', info.manifest.provider['waagent']['conf'], - os.path.join(info.root, 'etc/waagent.conf')]) + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + import os + waagent_version = info.manifest.provider['waagent']['version'] + waagent_file = 'WALinuxAgent-' + waagent_version + '.tar.gz' + waagent_url = 'https://github.com/Azure/WALinuxAgent/archive/' + waagent_file + log_check_call(['wget', '-P', info.root, waagent_url]) + waagent_directory = os.path.join(info.root, 'root') + log_check_call(['tar', 'xaf', os.path.join(info.root, waagent_file), '-C', waagent_directory]) + os.remove(os.path.join(info.root, waagent_file)) + waagent_script = '/root/WALinuxAgent-WALinuxAgent-' + waagent_version + '/waagent' + log_check_call(['chroot', info.root, 'cp', waagent_script, '/usr/sbin/waagent']) + log_check_call(['chroot', info.root, 'chmod', '755', '/usr/sbin/waagent']) + log_check_call(['chroot', info.root, 'waagent', '-install']) + if info.manifest.provider['waagent'].get('conf', False): + if os.path.isfile(info.manifest.provider['waagent']['conf']): + log_check_call(['cp', info.manifest.provider['waagent']['conf'], + os.path.join(info.root, 'etc/waagent.conf')]) - # The Azure Linux agent uses 'useradd' to add users, but SHELL - # is set to /bin/sh by default. Set this to /bin/bash instead. - from bootstrapvz.common.tools import sed_i - useradd_config = os.path.join(info.root, 'etc/default/useradd') - sed_i(useradd_config, r'^(SHELL=.*)', r'SHELL=/bin/bash') + # The Azure Linux agent uses 'useradd' to add users, but SHELL + # is set to /bin/sh by default. Set this to /bin/bash instead. + from bootstrapvz.common.tools import sed_i + useradd_config = os.path.join(info.root, 'etc/default/useradd') + sed_i(useradd_config, r'^(SHELL=.*)', r'SHELL=/bin/bash') diff --git a/bootstrapvz/providers/docker/__init__.py b/bootstrapvz/providers/docker/__init__.py index d21fb3c..8394de9 100644 --- a/bootstrapvz/providers/docker/__init__.py +++ b/bootstrapvz/providers/docker/__init__.py @@ -6,33 +6,33 @@ import tasks.image def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_base_group(manifest)) - taskset.update([folder.Create, - filesystem.CopyMountTable, - filesystem.RemoveMountTable, - folder.Delete, - ]) - taskset.update(task_groups.get_network_group(manifest)) - taskset.update(task_groups.get_apt_group(manifest)) - taskset.update(task_groups.get_locale_group(manifest)) - taskset.update(task_groups.security_group) - taskset.update(task_groups.cleanup_group) + taskset.update(task_groups.get_base_group(manifest)) + taskset.update([folder.Create, + filesystem.CopyMountTable, + filesystem.RemoveMountTable, + folder.Delete, + ]) + taskset.update(task_groups.get_network_group(manifest)) + taskset.update(task_groups.get_apt_group(manifest)) + taskset.update(task_groups.get_locale_group(manifest)) + taskset.update(task_groups.security_group) + taskset.update(task_groups.cleanup_group) - taskset.update([tasks.commands.AddRequiredCommands, - tasks.image.CreateDockerfileEntry, - tasks.image.CreateImage, - ]) - if 'labels' in manifest.provider: - taskset.add(tasks.image.PopulateLabels) - if 'dockerfile' in manifest.provider: - taskset.add(tasks.image.AppendManifestDockerfile) + taskset.update([tasks.commands.AddRequiredCommands, + tasks.image.CreateDockerfileEntry, + tasks.image.CreateImage, + ]) + if 'labels' in manifest.provider: + taskset.add(tasks.image.PopulateLabels) + if 'dockerfile' in manifest.provider: + taskset.add(tasks.image.AppendManifestDockerfile) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/docker/tasks/commands.py b/bootstrapvz/providers/docker/tasks/commands.py index 2ee74c2..5f1827c 100644 --- a/bootstrapvz/providers/docker/tasks/commands.py +++ b/bootstrapvz/providers/docker/tasks/commands.py @@ -4,10 +4,10 @@ from bootstrapvz.common.tasks import host class AddRequiredCommands(Task): - description = 'Adding commands required for docker' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Adding commands required for docker' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - info.host_dependencies['docker'] = 'docker.io' + @classmethod + def run(cls, info): + info.host_dependencies['docker'] = 'docker.io' diff --git a/bootstrapvz/providers/docker/tasks/image.py b/bootstrapvz/providers/docker/tasks/image.py index 6f41f3a..6093327 100644 --- a/bootstrapvz/providers/docker/tasks/image.py +++ b/bootstrapvz/providers/docker/tasks/image.py @@ -4,68 +4,68 @@ from bootstrapvz.common.tools import log_check_call class CreateDockerfileEntry(Task): - description = 'Creating the Dockerfile entry' - phase = phases.preparation + description = 'Creating the Dockerfile entry' + phase = phases.preparation - @classmethod - def run(cls, info): - info._docker['dockerfile'] = '' + @classmethod + def run(cls, info): + info._docker['dockerfile'] = '' class CreateImage(Task): - description = 'Creating docker image' - phase = phases.image_registration + description = 'Creating docker image' + phase = phases.image_registration - @classmethod - def run(cls, info): - from pipes import quote - tar_cmd = ['tar', '--create', '--numeric-owner', - '--directory', info.volume.path, '.'] - docker_cmd = ['docker', 'import', '--change', info._docker['dockerfile'], '-', - info.manifest.name.format(**info.manifest_vars)] - cmd = ' '.join(map(quote, tar_cmd)) + ' | ' + ' '.join(map(quote, docker_cmd)) - [info._docker['image_id']] = log_check_call([cmd], shell=True) + @classmethod + def run(cls, info): + from pipes import quote + tar_cmd = ['tar', '--create', '--numeric-owner', + '--directory', info.volume.path, '.'] + docker_cmd = ['docker', 'import', '--change', info._docker['dockerfile'], '-', + info.manifest.name.format(**info.manifest_vars)] + cmd = ' '.join(map(quote, tar_cmd)) + ' | ' + ' '.join(map(quote, docker_cmd)) + [info._docker['image_id']] = log_check_call([cmd], shell=True) class PopulateLabels(Task): - description = 'Populating docker labels' - phase = phases.image_registration - successors = [CreateImage] + description = 'Populating docker labels' + phase = phases.image_registration + successors = [CreateImage] - @classmethod - def run(cls, info): - import pyrfc3339 - from datetime import datetime - import pytz - labels = {} - labels['name'] = info.manifest.name.format(**info.manifest_vars) - # Inspired by https://github.com/projectatomic/ContainerApplicationGenericLabels - # See here for the discussion on the debian-cloud mailing list - # https://lists.debian.org/debian-cloud/2015/05/msg00071.html - labels['architecture'] = info.manifest.system['architecture'] - labels['build-date'] = pyrfc3339.generate(datetime.utcnow().replace(tzinfo=pytz.utc)) - if 'labels' in info.manifest.provider: - for label, value in info.manifest.provider['labels'].items(): - labels[label] = value.format(**info.manifest_vars) + @classmethod + def run(cls, info): + import pyrfc3339 + from datetime import datetime + import pytz + labels = {} + labels['name'] = info.manifest.name.format(**info.manifest_vars) + # Inspired by https://github.com/projectatomic/ContainerApplicationGenericLabels + # See here for the discussion on the debian-cloud mailing list + # https://lists.debian.org/debian-cloud/2015/05/msg00071.html + labels['architecture'] = info.manifest.system['architecture'] + labels['build-date'] = pyrfc3339.generate(datetime.utcnow().replace(tzinfo=pytz.utc)) + if 'labels' in info.manifest.provider: + for label, value in info.manifest.provider['labels'].items(): + labels[label] = value.format(**info.manifest_vars) - # pipes.quote converts newlines into \n rather than just prefixing - # it with a backslash, so we need to escape manually - def escape(value): - value = value.replace('"', '\\"') - value = value.replace('\n', '\\\n') - value = '"' + value + '"' - return value - kv_pairs = [label + '=' + escape(value) for label, value in labels.items()] - # Add some nice newlines and indentation - info._docker['dockerfile'] += 'LABEL ' + ' \\\n '.join(kv_pairs) + '\n' + # pipes.quote converts newlines into \n rather than just prefixing + # it with a backslash, so we need to escape manually + def escape(value): + value = value.replace('"', '\\"') + value = value.replace('\n', '\\\n') + value = '"' + value + '"' + return value + kv_pairs = [label + '=' + escape(value) for label, value in labels.items()] + # Add some nice newlines and indentation + info._docker['dockerfile'] += 'LABEL ' + ' \\\n '.join(kv_pairs) + '\n' class AppendManifestDockerfile(Task): - description = 'Appending Dockerfile instructions from the manifest' - phase = phases.image_registration - predecessors = [PopulateLabels] - successors = [CreateImage] + description = 'Appending Dockerfile instructions from the manifest' + phase = phases.image_registration + predecessors = [PopulateLabels] + successors = [CreateImage] - @classmethod - def run(cls, info): - info._docker['dockerfile'] += info.manifest.provider['dockerfile'] + '\n' + @classmethod + def run(cls, info): + info._docker['dockerfile'] += info.manifest.provider['dockerfile'] + '\n' diff --git a/bootstrapvz/providers/ec2/__init__.py b/bootstrapvz/providers/ec2/__init__.py index 5fb2cee..67ce08f 100644 --- a/bootstrapvz/providers/ec2/__init__.py +++ b/bootstrapvz/providers/ec2/__init__.py @@ -18,114 +18,114 @@ from bootstrapvz.common.tasks import kernel def validate_manifest(data, validator, error): - import os.path - validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + import os.path + validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - from bootstrapvz.common.bytes import Bytes - if data['volume']['backing'] == 'ebs': - volume_size = Bytes(0) - for key, partition in data['volume']['partitions'].iteritems(): - if key != 'type': - volume_size += Bytes(partition['size']) - if int(volume_size % Bytes('1GiB')) != 0: - msg = ('The volume size must be a multiple of 1GiB when using EBS backing') - error(msg, ['volume', 'partitions']) - else: - validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema-s3.yml')) + from bootstrapvz.common.bytes import Bytes + if data['volume']['backing'] == 'ebs': + volume_size = Bytes(0) + for key, partition in data['volume']['partitions'].iteritems(): + if key != 'type': + volume_size += Bytes(partition['size']) + if int(volume_size % Bytes('1GiB')) != 0: + msg = ('The volume size must be a multiple of 1GiB when using EBS backing') + error(msg, ['volume', 'partitions']) + else: + validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema-s3.yml')) - bootloader = data['system']['bootloader'] - virtualization = data['provider']['virtualization'] - backing = data['volume']['backing'] - partition_type = data['volume']['partitions']['type'] - enhanced_networking = data['provider']['enhanced_networking'] if 'enhanced_networking' in data['provider'] else None + bootloader = data['system']['bootloader'] + virtualization = data['provider']['virtualization'] + backing = data['volume']['backing'] + partition_type = data['volume']['partitions']['type'] + enhanced_networking = data['provider']['enhanced_networking'] if 'enhanced_networking' in data['provider'] else None - if virtualization == 'pvm' and bootloader != 'pvgrub': - error('Paravirtualized AMIs only support pvgrub as a bootloader', ['system', 'bootloader']) + if virtualization == 'pvm' and bootloader != 'pvgrub': + error('Paravirtualized AMIs only support pvgrub as a bootloader', ['system', 'bootloader']) - if backing != 'ebs' and virtualization == 'hvm': - error('HVM AMIs currently only work when they are EBS backed', ['volume', 'backing']) + if backing != 'ebs' and virtualization == 'hvm': + error('HVM AMIs currently only work when they are EBS backed', ['volume', 'backing']) - if backing == 's3' and partition_type != 'none': - error('S3 backed AMIs currently only work with unpartitioned volumes', ['system', 'bootloader']) + if backing == 's3' and partition_type != 'none': + error('S3 backed AMIs currently only work with unpartitioned volumes', ['system', 'bootloader']) - if enhanced_networking == 'simple' and virtualization != 'hvm': - error('Enhanced networking only works with HVM virtualization', ['provider', 'virtualization']) + if enhanced_networking == 'simple' and virtualization != 'hvm': + error('Enhanced networking only works with HVM virtualization', ['provider', 'virtualization']) def resolve_tasks(taskset, manifest): - from bootstrapvz.common.releases import wheezy, jessie + from bootstrapvz.common.releases import wheezy, jessie - taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update(task_groups.ssh_group) + taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update(task_groups.ssh_group) - taskset.update([tasks.host.AddExternalCommands, - tasks.packages.DefaultPackages, - tasks.connection.SilenceBotoDebug, - tasks.connection.GetCredentials, - tasks.ami.AMIName, - tasks.connection.Connect, + taskset.update([tasks.host.AddExternalCommands, + tasks.packages.DefaultPackages, + tasks.connection.SilenceBotoDebug, + tasks.connection.GetCredentials, + tasks.ami.AMIName, + tasks.connection.Connect, - boot.BlackListModules, - boot.DisableGetTTYs, - initd.AddExpandRoot, - initd.RemoveHWClock, - initd.InstallInitScripts, - tasks.ami.RegisterAMI, - ]) + boot.BlackListModules, + boot.DisableGetTTYs, + initd.AddExpandRoot, + initd.RemoveHWClock, + initd.InstallInitScripts, + tasks.ami.RegisterAMI, + ]) - if manifest.release > wheezy: - taskset.add(tasks.network.InstallNetworkingUDevHotplugAndDHCPSubinterface) + if manifest.release > wheezy: + taskset.add(tasks.network.InstallNetworkingUDevHotplugAndDHCPSubinterface) - if manifest.release <= wheezy: - # The default DHCP client `isc-dhcp' doesn't work properly on wheezy and earlier - taskset.add(tasks.network.InstallDHCPCD) - taskset.add(tasks.network.EnableDHCPCDDNS) + if manifest.release <= wheezy: + # The default DHCP client `isc-dhcp' doesn't work properly on wheezy and earlier + taskset.add(tasks.network.InstallDHCPCD) + taskset.add(tasks.network.EnableDHCPCDDNS) - if manifest.release >= jessie: - taskset.add(tasks.packages.AddWorkaroundGrowpart) - taskset.add(initd.AdjustGrowpartWorkaround) + if manifest.release >= jessie: + taskset.add(tasks.packages.AddWorkaroundGrowpart) + taskset.add(initd.AdjustGrowpartWorkaround) - if manifest.provider.get('install_init_scripts', True): - taskset.add(tasks.initd.AddEC2InitScripts) + if manifest.provider.get('install_init_scripts', True): + taskset.add(tasks.initd.AddEC2InitScripts) - if manifest.volume['partitions']['type'] != 'none': - taskset.add(initd.AdjustExpandRootScript) + if manifest.volume['partitions']['type'] != 'none': + taskset.add(initd.AdjustExpandRootScript) - if manifest.system['bootloader'] == 'pvgrub': - taskset.add(grub.AddGrubPackage) - taskset.add(tasks.boot.ConfigurePVGrub) + if manifest.system['bootloader'] == 'pvgrub': + taskset.add(grub.AddGrubPackage) + taskset.add(tasks.boot.ConfigurePVGrub) - if manifest.volume['backing'].lower() == 'ebs': - taskset.update([tasks.host.GetInstanceMetadata, - tasks.ebs.Create, - tasks.ebs.Snapshot, - ]) - taskset.add(tasks.ebs.Attach) - taskset.discard(volume.Attach) + if manifest.volume['backing'].lower() == 'ebs': + taskset.update([tasks.host.GetInstanceMetadata, + tasks.ebs.Create, + tasks.ebs.Snapshot, + ]) + taskset.add(tasks.ebs.Attach) + taskset.discard(volume.Attach) - if manifest.volume['backing'].lower() == 's3': - taskset.update([loopback.AddRequiredCommands, - tasks.host.SetRegion, - loopback.Create, - tasks.filesystem.S3FStab, - tasks.ami.BundleImage, - tasks.ami.UploadImage, - tasks.ami.RemoveBundle, - ]) - taskset.discard(filesystem.FStab) + if manifest.volume['backing'].lower() == 's3': + taskset.update([loopback.AddRequiredCommands, + tasks.host.SetRegion, + loopback.Create, + tasks.filesystem.S3FStab, + tasks.ami.BundleImage, + tasks.ami.UploadImage, + tasks.ami.RemoveBundle, + ]) + taskset.discard(filesystem.FStab) - if manifest.provider.get('enhanced_networking', None) == 'simple': - taskset.update([kernel.AddDKMSPackages, - tasks.network.InstallEnhancedNetworking, - kernel.UpdateInitramfs]) + if manifest.provider.get('enhanced_networking', None) == 'simple': + taskset.update([kernel.AddDKMSPackages, + tasks.network.InstallEnhancedNetworking, + kernel.UpdateInitramfs]) - taskset.update([filesystem.Format, - volume.Delete, - ]) + taskset.update([filesystem.Format, + volume.Delete, + ]) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) - counter_task(taskset, tasks.ebs.Create, volume.Delete) - counter_task(taskset, tasks.ebs.Attach, volume.Detach) - counter_task(taskset, tasks.ami.BundleImage, tasks.ami.RemoveBundle) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) + counter_task(taskset, tasks.ebs.Create, volume.Delete) + counter_task(taskset, tasks.ebs.Attach, volume.Detach) + counter_task(taskset, tasks.ami.BundleImage, tasks.ami.RemoveBundle) diff --git a/bootstrapvz/providers/ec2/ebsvolume.py b/bootstrapvz/providers/ec2/ebsvolume.py index d548f3e..049094e 100644 --- a/bootstrapvz/providers/ec2/ebsvolume.py +++ b/bootstrapvz/providers/ec2/ebsvolume.py @@ -5,54 +5,54 @@ import time class EBSVolume(Volume): - def create(self, conn, zone): - self.fsm.create(connection=conn, zone=zone) + def create(self, conn, zone): + self.fsm.create(connection=conn, zone=zone) - def _before_create(self, e): - conn = e.connection - zone = e.zone - size = self.size.bytes.get_qty_in('GiB') - self.volume = conn.create_volume(size, zone, volume_type='gp2') - while self.volume.volume_state() != 'available': - time.sleep(5) - self.volume.update() + def _before_create(self, e): + conn = e.connection + zone = e.zone + size = self.size.bytes.get_qty_in('GiB') + self.volume = conn.create_volume(size, zone, volume_type='gp2') + while self.volume.volume_state() != 'available': + time.sleep(5) + self.volume.update() - def attach(self, instance_id): - self.fsm.attach(instance_id=instance_id) + def attach(self, instance_id): + self.fsm.attach(instance_id=instance_id) - def _before_attach(self, e): - instance_id = e.instance_id - import os.path - import string - for letter in string.ascii_lowercase[5:]: - dev_path = os.path.join('/dev', 'xvd' + letter) - if not os.path.exists(dev_path): - self.device_path = dev_path - self.ec2_device_path = os.path.join('/dev', 'sd' + letter) - break + def _before_attach(self, e): + instance_id = e.instance_id + import os.path + import string + for letter in string.ascii_lowercase[5:]: + dev_path = os.path.join('/dev', 'xvd' + letter) + if not os.path.exists(dev_path): + self.device_path = dev_path + self.ec2_device_path = os.path.join('/dev', 'sd' + letter) + break - if self.device_path is None: - raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') + if self.device_path is None: + raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume') - self.volume.attach(instance_id, self.ec2_device_path) - while self.volume.attachment_state() != 'attached': - time.sleep(2) - self.volume.update() + self.volume.attach(instance_id, self.ec2_device_path) + while self.volume.attachment_state() != 'attached': + time.sleep(2) + self.volume.update() - def _before_detach(self, e): - self.volume.detach() - while self.volume.attachment_state() is not None: - time.sleep(2) - self.volume.update() - del self.ec2_device_path - self.device_path = None + def _before_detach(self, e): + self.volume.detach() + while self.volume.attachment_state() is not None: + time.sleep(2) + self.volume.update() + del self.ec2_device_path + self.device_path = None - def _before_delete(self, e): - self.volume.delete() + def _before_delete(self, e): + self.volume.delete() - def snapshot(self): - snapshot = self.volume.create_snapshot() - while snapshot.status != 'completed': - time.sleep(2) - snapshot.update() - return snapshot + def snapshot(self): + snapshot = self.volume.create_snapshot() + while snapshot.status != 'completed': + time.sleep(2) + snapshot.update() + return snapshot diff --git a/bootstrapvz/providers/ec2/tasks/ami.py b/bootstrapvz/providers/ec2/tasks/ami.py index 103c5ff..be99b29 100644 --- a/bootstrapvz/providers/ec2/tasks/ami.py +++ b/bootstrapvz/providers/ec2/tasks/ami.py @@ -12,116 +12,116 @@ cert_ec2 = os.path.join(assets, 'certs/cert-ec2.pem') class AMIName(Task): - description = 'Determining the AMI name' - phase = phases.preparation - predecessors = [Connect] + description = 'Determining the AMI name' + phase = phases.preparation + predecessors = [Connect] - @classmethod - def run(cls, info): - ami_name = info.manifest.name.format(**info.manifest_vars) - ami_description = info.manifest.provider['description'].format(**info.manifest_vars) + @classmethod + def run(cls, info): + ami_name = info.manifest.name.format(**info.manifest_vars) + ami_description = info.manifest.provider['description'].format(**info.manifest_vars) - images = info._ec2['connection'].get_all_images(owners=['self']) - for image in images: - if ami_name == image.name: - msg = 'An image by the name {ami_name} already exists.'.format(ami_name=ami_name) - raise TaskError(msg) - info._ec2['ami_name'] = ami_name - info._ec2['ami_description'] = ami_description + images = info._ec2['connection'].get_all_images(owners=['self']) + for image in images: + if ami_name == image.name: + msg = 'An image by the name {ami_name} already exists.'.format(ami_name=ami_name) + raise TaskError(msg) + info._ec2['ami_name'] = ami_name + info._ec2['ami_description'] = ami_description class BundleImage(Task): - description = 'Bundling the image' - phase = phases.image_registration + description = 'Bundling the image' + phase = phases.image_registration - @classmethod - def run(cls, info): - bundle_name = 'bundle-' + info.run_id - info._ec2['bundle_path'] = os.path.join(info.workspace, bundle_name) - arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) - log_check_call(['euca-bundle-image', - '--image', info.volume.image_path, - '--arch', arch, - '--user', info.credentials['user-id'], - '--privatekey', info.credentials['private-key'], - '--cert', info.credentials['certificate'], - '--ec2cert', cert_ec2, - '--destination', info._ec2['bundle_path'], - '--prefix', info._ec2['ami_name']]) + @classmethod + def run(cls, info): + bundle_name = 'bundle-' + info.run_id + info._ec2['bundle_path'] = os.path.join(info.workspace, bundle_name) + arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) + log_check_call(['euca-bundle-image', + '--image', info.volume.image_path, + '--arch', arch, + '--user', info.credentials['user-id'], + '--privatekey', info.credentials['private-key'], + '--cert', info.credentials['certificate'], + '--ec2cert', cert_ec2, + '--destination', info._ec2['bundle_path'], + '--prefix', info._ec2['ami_name']]) class UploadImage(Task): - description = 'Uploading the image bundle' - phase = phases.image_registration - predecessors = [BundleImage] + description = 'Uploading the image bundle' + phase = phases.image_registration + predecessors = [BundleImage] - @classmethod - def run(cls, info): - manifest_file = os.path.join(info._ec2['bundle_path'], info._ec2['ami_name'] + '.manifest.xml') - if info._ec2['region'] == 'us-east-1': - s3_url = 'https://s3.amazonaws.com/' - elif info._ec2['region'] == 'cn-north-1': - s3_url = 'https://s3.cn-north-1.amazonaws.com.cn' - else: - s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info._ec2['region']) - info._ec2['manifest_location'] = info.manifest.provider['bucket'] + '/' + info._ec2['ami_name'] + '.manifest.xml' - log_check_call(['euca-upload-bundle', - '--bucket', info.manifest.provider['bucket'], - '--manifest', manifest_file, - '--access-key', info.credentials['access-key'], - '--secret-key', info.credentials['secret-key'], - '--url', s3_url, - '--region', info._ec2['region']]) + @classmethod + def run(cls, info): + manifest_file = os.path.join(info._ec2['bundle_path'], info._ec2['ami_name'] + '.manifest.xml') + if info._ec2['region'] == 'us-east-1': + s3_url = 'https://s3.amazonaws.com/' + elif info._ec2['region'] == 'cn-north-1': + s3_url = 'https://s3.cn-north-1.amazonaws.com.cn' + else: + s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info._ec2['region']) + info._ec2['manifest_location'] = info.manifest.provider['bucket'] + '/' + info._ec2['ami_name'] + '.manifest.xml' + log_check_call(['euca-upload-bundle', + '--bucket', info.manifest.provider['bucket'], + '--manifest', manifest_file, + '--access-key', info.credentials['access-key'], + '--secret-key', info.credentials['secret-key'], + '--url', s3_url, + '--region', info._ec2['region']]) class RemoveBundle(Task): - description = 'Removing the bundle files' - phase = phases.cleaning - successors = [workspace.DeleteWorkspace] + description = 'Removing the bundle files' + phase = phases.cleaning + successors = [workspace.DeleteWorkspace] - @classmethod - def run(cls, info): - from shutil import rmtree - rmtree(info._ec2['bundle_path']) - del info._ec2['bundle_path'] + @classmethod + def run(cls, info): + from shutil import rmtree + rmtree(info._ec2['bundle_path']) + del info._ec2['bundle_path'] class RegisterAMI(Task): - description = 'Registering the image as an AMI' - phase = phases.image_registration - predecessors = [Snapshot, UploadImage] + description = 'Registering the image as an AMI' + phase = phases.image_registration + predecessors = [Snapshot, UploadImage] - @classmethod - def run(cls, info): - registration_params = {'name': info._ec2['ami_name'], - 'description': info._ec2['ami_description']} - registration_params['architecture'] = {'i386': 'i386', - 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) + @classmethod + def run(cls, info): + registration_params = {'name': info._ec2['ami_name'], + 'description': info._ec2['ami_description']} + registration_params['architecture'] = {'i386': 'i386', + 'amd64': 'x86_64'}.get(info.manifest.system['architecture']) - if info.manifest.volume['backing'] == 's3': - registration_params['image_location'] = info._ec2['manifest_location'] - else: - root_dev_name = {'pvm': '/dev/sda', - 'hvm': '/dev/xvda'}.get(info.manifest.provider['virtualization']) - registration_params['root_device_name'] = root_dev_name + if info.manifest.volume['backing'] == 's3': + registration_params['image_location'] = info._ec2['manifest_location'] + else: + root_dev_name = {'pvm': '/dev/sda', + 'hvm': '/dev/xvda'}.get(info.manifest.provider['virtualization']) + registration_params['root_device_name'] = root_dev_name - from boto.ec2.blockdevicemapping import BlockDeviceType - from boto.ec2.blockdevicemapping import BlockDeviceMapping - block_device = BlockDeviceType(snapshot_id=info._ec2['snapshot'].id, delete_on_termination=True, - size=info.volume.size.bytes.get_qty_in('GiB'), volume_type='gp2') - registration_params['block_device_map'] = BlockDeviceMapping() - registration_params['block_device_map'][root_dev_name] = block_device + from boto.ec2.blockdevicemapping import BlockDeviceType + from boto.ec2.blockdevicemapping import BlockDeviceMapping + block_device = BlockDeviceType(snapshot_id=info._ec2['snapshot'].id, delete_on_termination=True, + size=info.volume.size.bytes.get_qty_in('GiB'), volume_type='gp2') + registration_params['block_device_map'] = BlockDeviceMapping() + registration_params['block_device_map'][root_dev_name] = block_device - if info.manifest.provider['virtualization'] == 'hvm': - registration_params['virtualization_type'] = 'hvm' - else: - registration_params['virtualization_type'] = 'paravirtual' - akis_path = os.path.join(os.path.dirname(__file__), 'ami-akis.yml') - from bootstrapvz.common.tools import config_get - registration_params['kernel_id'] = config_get(akis_path, [info._ec2['region'], - info.manifest.system['architecture']]) + if info.manifest.provider['virtualization'] == 'hvm': + registration_params['virtualization_type'] = 'hvm' + else: + registration_params['virtualization_type'] = 'paravirtual' + akis_path = os.path.join(os.path.dirname(__file__), 'ami-akis.yml') + from bootstrapvz.common.tools import config_get + registration_params['kernel_id'] = config_get(akis_path, [info._ec2['region'], + info.manifest.system['architecture']]) - if info.manifest.provider.get('enhanced_networking', None) == 'simple': - registration_params['sriov_net_support'] = 'simple' + if info.manifest.provider.get('enhanced_networking', None) == 'simple': + registration_params['sriov_net_support'] = 'simple' - info._ec2['image'] = info._ec2['connection'].register_image(**registration_params) + info._ec2['image'] = info._ec2['connection'].register_image(**registration_params) diff --git a/bootstrapvz/providers/ec2/tasks/boot.py b/bootstrapvz/providers/ec2/tasks/boot.py index 11b322f..7893b91 100644 --- a/bootstrapvz/providers/ec2/tasks/boot.py +++ b/bootstrapvz/providers/ec2/tasks/boot.py @@ -5,48 +5,48 @@ import os class ConfigurePVGrub(Task): - description = 'Creating grub config files for PVGrub' - phase = phases.system_modification + description = 'Creating grub config files for PVGrub' + phase = phases.system_modification - @classmethod - def run(cls, info): - import stat - rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | - stat.S_IRGRP | stat.S_IXGRP | - stat.S_IROTH | stat.S_IXOTH) - x_all = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + @classmethod + def run(cls, info): + import stat + rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | + stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + x_all = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH - grubd = os.path.join(info.root, 'etc/grub.d') - for cfg in [os.path.join(grubd, f) for f in os.listdir(grubd)]: - os.chmod(cfg, os.stat(cfg).st_mode & ~ x_all) + grubd = os.path.join(info.root, 'etc/grub.d') + for cfg in [os.path.join(grubd, f) for f in os.listdir(grubd)]: + os.chmod(cfg, os.stat(cfg).st_mode & ~ x_all) - from shutil import copy - script_src = os.path.join(assets, 'grub.d/40_custom') - script_dst = os.path.join(info.root, 'etc/grub.d/40_custom') - copy(script_src, script_dst) - os.chmod(script_dst, rwxr_xr_x) + from shutil import copy + script_src = os.path.join(assets, 'grub.d/40_custom') + script_dst = os.path.join(info.root, 'etc/grub.d/40_custom') + copy(script_src, script_dst) + os.chmod(script_dst, rwxr_xr_x) - from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - if not isinstance(info.volume.partition_map, NoPartitions): - from bootstrapvz.common.tools import sed_i - root_idx = info.volume.partition_map.root.get_index() - grub_device = 'GRUB_DEVICE=/dev/xvda' + str(root_idx) - sed_i(script_dst, '^GRUB_DEVICE=/dev/xvda$', grub_device) - grub_root = '\troot (hd0,{idx})'.format(idx=root_idx - 1) - sed_i(script_dst, '^\troot \(hd0\)$', grub_root) + from bootstrapvz.base.fs.partitionmaps.none import NoPartitions + if not isinstance(info.volume.partition_map, NoPartitions): + from bootstrapvz.common.tools import sed_i + root_idx = info.volume.partition_map.root.get_index() + grub_device = 'GRUB_DEVICE=/dev/xvda' + str(root_idx) + sed_i(script_dst, '^GRUB_DEVICE=/dev/xvda$', grub_device) + grub_root = '\troot (hd0,{idx})'.format(idx=root_idx - 1) + sed_i(script_dst, '^\troot \(hd0\)$', grub_root) - if info.manifest.volume['backing'] == 's3': - from bootstrapvz.common.tools import sed_i - sed_i(script_dst, '^GRUB_DEVICE=/dev/xvda$', 'GRUB_DEVICE=/dev/xvda1') + if info.manifest.volume['backing'] == 's3': + from bootstrapvz.common.tools import sed_i + sed_i(script_dst, '^GRUB_DEVICE=/dev/xvda$', 'GRUB_DEVICE=/dev/xvda1') - from bootstrapvz.common.tools import sed_i - grub_def = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_def, '^GRUB_TIMEOUT=[0-9]+', 'GRUB_TIMEOUT=0\n' - 'GRUB_HIDDEN_TIMEOUT=true') - sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') - sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT=.*', 'GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0 console=hvc0 elevator=noop"') + from bootstrapvz.common.tools import sed_i + grub_def = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_def, '^GRUB_TIMEOUT=[0-9]+', 'GRUB_TIMEOUT=0\n' + 'GRUB_HIDDEN_TIMEOUT=true') + sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') + sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT=.*', 'GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0 console=hvc0 elevator=noop"') - from bootstrapvz.common.tools import log_check_call - log_check_call(['chroot', info.root, 'update-grub']) - log_check_call(['chroot', info.root, - 'ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst']) + from bootstrapvz.common.tools import log_check_call + log_check_call(['chroot', info.root, 'update-grub']) + log_check_call(['chroot', info.root, + 'ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst']) diff --git a/bootstrapvz/providers/ec2/tasks/connection.py b/bootstrapvz/providers/ec2/tasks/connection.py index 655d3a7..f597f72 100644 --- a/bootstrapvz/providers/ec2/tasks/connection.py +++ b/bootstrapvz/providers/ec2/tasks/connection.py @@ -4,73 +4,73 @@ import host class SilenceBotoDebug(Task): - description = 'Silence boto debug logging' - phase = phases.preparation + description = 'Silence boto debug logging' + phase = phases.preparation - @classmethod - def run(cls, info): - # Regardless of of loglevel, we don't want boto debug stuff, it's very noisy - import logging - logging.getLogger('boto').setLevel(logging.INFO) + @classmethod + def run(cls, info): + # Regardless of of loglevel, we don't want boto debug stuff, it's very noisy + import logging + logging.getLogger('boto').setLevel(logging.INFO) class GetCredentials(Task): - description = 'Getting AWS credentials' - phase = phases.preparation - successors = [SilenceBotoDebug] + description = 'Getting AWS credentials' + phase = phases.preparation + successors = [SilenceBotoDebug] - @classmethod - def run(cls, info): - keys = ['access-key', 'secret-key'] - if info.manifest.volume['backing'] == 's3': - keys.extend(['certificate', 'private-key', 'user-id']) - info.credentials = cls.get_credentials(info.manifest, keys) + @classmethod + def run(cls, info): + keys = ['access-key', 'secret-key'] + if info.manifest.volume['backing'] == 's3': + keys.extend(['certificate', 'private-key', 'user-id']) + info.credentials = cls.get_credentials(info.manifest, keys) - @classmethod - def get_credentials(cls, manifest, keys): - from os import getenv - creds = {} - if 'credentials' in manifest.provider: - if all(key in manifest.provider['credentials'] for key in keys): - for key in keys: - creds[key] = manifest.provider['credentials'][key] - return creds + @classmethod + def get_credentials(cls, manifest, keys): + from os import getenv + creds = {} + if 'credentials' in manifest.provider: + if all(key in manifest.provider['credentials'] for key in keys): + for key in keys: + creds[key] = manifest.provider['credentials'][key] + return creds - def env_key(key): - return ('aws-' + key).upper().replace('-', '_') - if all(getenv(env_key(key)) is not None for key in keys): - for key in keys: - creds[key] = getenv(env_key(key)) - return creds + def env_key(key): + return ('aws-' + key).upper().replace('-', '_') + if all(getenv(env_key(key)) is not None for key in keys): + for key in keys: + creds[key] = getenv(env_key(key)) + return creds - def provider_key(key): - return key.replace('-', '_') - import boto.provider - provider = boto.provider.Provider('aws') - if all(getattr(provider, provider_key(key)) is not None for key in keys): - for key in keys: - creds[key] = getattr(provider, provider_key(key)) - if hasattr(provider, 'security_token'): - creds['security-token'] = provider.security_token - return creds - raise RuntimeError(('No ec2 credentials found, they must all be specified ' - 'exclusively via environment variables or through the manifest.')) + def provider_key(key): + return key.replace('-', '_') + import boto.provider + provider = boto.provider.Provider('aws') + if all(getattr(provider, provider_key(key)) is not None for key in keys): + for key in keys: + creds[key] = getattr(provider, provider_key(key)) + if hasattr(provider, 'security_token'): + creds['security-token'] = provider.security_token + return creds + raise RuntimeError(('No ec2 credentials found, they must all be specified ' + 'exclusively via environment variables or through the manifest.')) class Connect(Task): - description = 'Connecting to EC2' - phase = phases.preparation - predecessors = [GetCredentials, host.GetInstanceMetadata, host.SetRegion] + description = 'Connecting to EC2' + phase = phases.preparation + predecessors = [GetCredentials, host.GetInstanceMetadata, host.SetRegion] - @classmethod - def run(cls, info): - from boto.ec2 import connect_to_region - connect_args = { - 'aws_access_key_id': info.credentials['access-key'], - 'aws_secret_access_key': info.credentials['secret-key'] - } + @classmethod + def run(cls, info): + from boto.ec2 import connect_to_region + connect_args = { + 'aws_access_key_id': info.credentials['access-key'], + 'aws_secret_access_key': info.credentials['secret-key'] + } - if 'security-token' in info.credentials: - connect_args['security_token'] = info.credentials['security-token'] + if 'security-token' in info.credentials: + connect_args['security_token'] = info.credentials['security-token'] - info._ec2['connection'] = connect_to_region(info._ec2['region'], **connect_args) + info._ec2['connection'] = connect_to_region(info._ec2['region'], **connect_args) diff --git a/bootstrapvz/providers/ec2/tasks/ebs.py b/bootstrapvz/providers/ec2/tasks/ebs.py index ff627d5..f50aa6b 100644 --- a/bootstrapvz/providers/ec2/tasks/ebs.py +++ b/bootstrapvz/providers/ec2/tasks/ebs.py @@ -3,28 +3,28 @@ from bootstrapvz.common import phases class Create(Task): - description = 'Creating the EBS volume' - phase = phases.volume_creation + description = 'Creating the EBS volume' + phase = phases.volume_creation - @classmethod - def run(cls, info): - info.volume.create(info._ec2['connection'], info._ec2['host']['availabilityZone']) + @classmethod + def run(cls, info): + info.volume.create(info._ec2['connection'], info._ec2['host']['availabilityZone']) class Attach(Task): - description = 'Attaching the volume' - phase = phases.volume_creation - predecessors = [Create] + description = 'Attaching the volume' + phase = phases.volume_creation + predecessors = [Create] - @classmethod - def run(cls, info): - info.volume.attach(info._ec2['host']['instanceId']) + @classmethod + def run(cls, info): + info.volume.attach(info._ec2['host']['instanceId']) class Snapshot(Task): - description = 'Creating a snapshot of the EBS volume' - phase = phases.image_registration + description = 'Creating a snapshot of the EBS volume' + phase = phases.image_registration - @classmethod - def run(cls, info): - info._ec2['snapshot'] = info.volume.snapshot() + @classmethod + def run(cls, info): + info._ec2['snapshot'] = info.volume.snapshot() diff --git a/bootstrapvz/providers/ec2/tasks/filesystem.py b/bootstrapvz/providers/ec2/tasks/filesystem.py index 2c0b19f..017a346 100644 --- a/bootstrapvz/providers/ec2/tasks/filesystem.py +++ b/bootstrapvz/providers/ec2/tasks/filesystem.py @@ -3,26 +3,26 @@ from bootstrapvz.common import phases class S3FStab(Task): - description = 'Adding the S3 root partition to the fstab' - phase = phases.system_modification + description = 'Adding the S3 root partition to the fstab' + phase = phases.system_modification - @classmethod - def run(cls, info): - import os.path - root = info.volume.partition_map.root + @classmethod + def run(cls, info): + import os.path + root = info.volume.partition_map.root - fstab_lines = [] - mount_opts = ['defaults'] - fstab_lines.append('{device_path}{idx} {mountpoint} {filesystem} {mount_opts} {dump} {pass_num}' - .format(device_path='/dev/xvda', - idx=1, - mountpoint='/', - filesystem=root.filesystem, - mount_opts=','.join(mount_opts), - dump='1', - pass_num='1')) + fstab_lines = [] + mount_opts = ['defaults'] + fstab_lines.append('{device_path}{idx} {mountpoint} {filesystem} {mount_opts} {dump} {pass_num}' + .format(device_path='/dev/xvda', + idx=1, + mountpoint='/', + filesystem=root.filesystem, + mount_opts=','.join(mount_opts), + dump='1', + pass_num='1')) - fstab_path = os.path.join(info.root, 'etc/fstab') - with open(fstab_path, 'w') as fstab: - fstab.write('\n'.join(fstab_lines)) - fstab.write('\n') + fstab_path = os.path.join(info.root, 'etc/fstab') + with open(fstab_path, 'w') as fstab: + fstab.write('\n'.join(fstab_lines)) + fstab.write('\n') diff --git a/bootstrapvz/providers/ec2/tasks/host.py b/bootstrapvz/providers/ec2/tasks/host.py index 508678e..bedb368 100644 --- a/bootstrapvz/providers/ec2/tasks/host.py +++ b/bootstrapvz/providers/ec2/tasks/host.py @@ -4,35 +4,35 @@ from bootstrapvz.common.tasks import host class AddExternalCommands(Task): - description = 'Determining required external commands for EC2 bootstrapping' - phase = phases.preparation - successors = [host.CheckExternalCommands] + description = 'Determining required external commands for EC2 bootstrapping' + phase = phases.preparation + successors = [host.CheckExternalCommands] - @classmethod - def run(cls, info): - if info.manifest.volume['backing'] == 's3': - info.host_dependencies['euca-bundle-image'] = 'euca2ools' - info.host_dependencies['euca-upload-bundle'] = 'euca2ools' + @classmethod + def run(cls, info): + if info.manifest.volume['backing'] == 's3': + info.host_dependencies['euca-bundle-image'] = 'euca2ools' + info.host_dependencies['euca-upload-bundle'] = 'euca2ools' class GetInstanceMetadata(Task): - description = 'Retrieving instance metadata' - phase = phases.preparation + description = 'Retrieving instance metadata' + phase = phases.preparation - @classmethod - def run(cls, info): - import urllib2 - import json - metadata_url = 'http://169.254.169.254/latest/dynamic/instance-identity/document' - response = urllib2.urlopen(url=metadata_url, timeout=5) - info._ec2['host'] = json.load(response) - info._ec2['region'] = info._ec2['host']['region'] + @classmethod + def run(cls, info): + import urllib2 + import json + metadata_url = 'http://169.254.169.254/latest/dynamic/instance-identity/document' + response = urllib2.urlopen(url=metadata_url, timeout=5) + info._ec2['host'] = json.load(response) + info._ec2['region'] = info._ec2['host']['region'] class SetRegion(Task): - description = 'Setting the AWS region' - phase = phases.preparation + description = 'Setting the AWS region' + phase = phases.preparation - @classmethod - def run(cls, info): - info._ec2['region'] = info.manifest.provider['region'] + @classmethod + def run(cls, info): + info._ec2['region'] = info.manifest.provider['region'] diff --git a/bootstrapvz/providers/ec2/tasks/initd.py b/bootstrapvz/providers/ec2/tasks/initd.py index 82a9902..12f6ae2 100644 --- a/bootstrapvz/providers/ec2/tasks/initd.py +++ b/bootstrapvz/providers/ec2/tasks/initd.py @@ -6,15 +6,15 @@ import os.path class AddEC2InitScripts(Task): - description = 'Adding EC2 startup scripts' - phase = phases.system_modification - successors = [initd.InstallInitScripts] + description = 'Adding EC2 startup scripts' + phase = phases.system_modification + successors = [initd.InstallInitScripts] - @classmethod - def run(cls, info): - init_scripts = {'ec2-get-credentials': 'ec2-get-credentials', - 'ec2-run-user-data': 'ec2-run-user-data'} + @classmethod + def run(cls, info): + init_scripts = {'ec2-get-credentials': 'ec2-get-credentials', + 'ec2-run-user-data': 'ec2-run-user-data'} - init_scripts_dir = os.path.join(assets, 'init.d') - for name, path in init_scripts.iteritems(): - info.initd['install'][name] = os.path.join(init_scripts_dir, path) + init_scripts_dir = os.path.join(assets, 'init.d') + for name, path in init_scripts.iteritems(): + info.initd['install'][name] = os.path.join(init_scripts_dir, path) diff --git a/bootstrapvz/providers/ec2/tasks/network.py b/bootstrapvz/providers/ec2/tasks/network.py index 91749b4..5c971a6 100644 --- a/bootstrapvz/providers/ec2/tasks/network.py +++ b/bootstrapvz/providers/ec2/tasks/network.py @@ -5,104 +5,104 @@ import os.path class InstallDHCPCD(Task): - description = 'Replacing isc-dhcp with dhcpcd' - phase = phases.preparation + description = 'Replacing isc-dhcp with dhcpcd' + phase = phases.preparation - @classmethod - def run(cls, info): - # isc-dhcp-client before jessie doesn't work properly with ec2 - info.packages.add('dhcpcd') - info.exclude_packages.add('isc-dhcp-client') - info.exclude_packages.add('isc-dhcp-common') + @classmethod + def run(cls, info): + # isc-dhcp-client before jessie doesn't work properly with ec2 + info.packages.add('dhcpcd') + info.exclude_packages.add('isc-dhcp-client') + info.exclude_packages.add('isc-dhcp-common') class EnableDHCPCDDNS(Task): - description = 'Configuring the DHCP client to set the nameservers' - phase = phases.system_modification + description = 'Configuring the DHCP client to set the nameservers' + phase = phases.system_modification - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - dhcpcd = os.path.join(info.root, 'etc/default/dhcpcd') - sed_i(dhcpcd, '^#*SET_DNS=.*', 'SET_DNS=\'yes\'') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + dhcpcd = os.path.join(info.root, 'etc/default/dhcpcd') + sed_i(dhcpcd, '^#*SET_DNS=.*', 'SET_DNS=\'yes\'') class AddBuildEssentialPackage(Task): - description = 'Adding build-essential package' - phase = phases.preparation + description = 'Adding build-essential package' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('build-essential') + @classmethod + def run(cls, info): + info.packages.add('build-essential') class InstallNetworkingUDevHotplugAndDHCPSubinterface(Task): - description = 'Setting up udev and DHCPD rules for EC2 networking' - phase = phases.system_modification + description = 'Setting up udev and DHCPD rules for EC2 networking' + phase = phases.system_modification - @classmethod - def run(cls, info): - from . import assets - script_src = os.path.join(assets, 'ec2') - script_dst = os.path.join(info.root, 'etc') + @classmethod + def run(cls, info): + from . import assets + script_src = os.path.join(assets, 'ec2') + script_dst = os.path.join(info.root, 'etc') - import stat - rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | - stat.S_IRGRP | stat.S_IXGRP | - stat.S_IROTH | stat.S_IXOTH) + import stat + rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | + stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) - from shutil import copy - copy(os.path.join(script_src, '53-ec2-network-interfaces.rules'), - os.path.join(script_dst, 'udev/rules.d/53-ec2-network-interfaces.rules')) - os.chmod(os.path.join(script_dst, 'udev/rules.d/53-ec2-network-interfaces.rules'), rwxr_xr_x) + from shutil import copy + copy(os.path.join(script_src, '53-ec2-network-interfaces.rules'), + os.path.join(script_dst, 'udev/rules.d/53-ec2-network-interfaces.rules')) + os.chmod(os.path.join(script_dst, 'udev/rules.d/53-ec2-network-interfaces.rules'), rwxr_xr_x) - os.mkdir(os.path.join(script_dst, 'sysconfig'), 0755) - os.mkdir(os.path.join(script_dst, 'sysconfig/network-scripts'), 0755) - copy(os.path.join(script_src, 'ec2net.hotplug'), - os.path.join(script_dst, 'sysconfig/network-scripts/ec2net.hotplug')) - os.chmod(os.path.join(script_dst, 'sysconfig/network-scripts/ec2net.hotplug'), rwxr_xr_x) + os.mkdir(os.path.join(script_dst, 'sysconfig'), 0755) + os.mkdir(os.path.join(script_dst, 'sysconfig/network-scripts'), 0755) + copy(os.path.join(script_src, 'ec2net.hotplug'), + os.path.join(script_dst, 'sysconfig/network-scripts/ec2net.hotplug')) + os.chmod(os.path.join(script_dst, 'sysconfig/network-scripts/ec2net.hotplug'), rwxr_xr_x) - copy(os.path.join(script_src, 'ec2net-functions'), - os.path.join(script_dst, 'sysconfig/network-scripts/ec2net-functions')) - os.chmod(os.path.join(script_dst, 'sysconfig/network-scripts/ec2net-functions'), rwxr_xr_x) + copy(os.path.join(script_src, 'ec2net-functions'), + os.path.join(script_dst, 'sysconfig/network-scripts/ec2net-functions')) + os.chmod(os.path.join(script_dst, 'sysconfig/network-scripts/ec2net-functions'), rwxr_xr_x) - copy(os.path.join(script_src, 'ec2dhcp.sh'), - os.path.join(script_dst, 'dhcp/dhclient-exit-hooks.d/ec2dhcp.sh')) - os.chmod(os.path.join(script_dst, 'dhcp/dhclient-exit-hooks.d/ec2dhcp.sh'), rwxr_xr_x) + copy(os.path.join(script_src, 'ec2dhcp.sh'), + os.path.join(script_dst, 'dhcp/dhclient-exit-hooks.d/ec2dhcp.sh')) + os.chmod(os.path.join(script_dst, 'dhcp/dhclient-exit-hooks.d/ec2dhcp.sh'), rwxr_xr_x) - with open(os.path.join(script_dst, 'network/interfaces'), "a") as interfaces: - interfaces.write("iface eth1 inet dhcp\n") - interfaces.write("iface eth2 inet dhcp\n") - interfaces.write("iface eth3 inet dhcp\n") - interfaces.write("iface eth4 inet dhcp\n") - interfaces.write("iface eth5 inet dhcp\n") - interfaces.write("iface eth6 inet dhcp\n") - interfaces.write("iface eth7 inet dhcp\n") + with open(os.path.join(script_dst, 'network/interfaces'), "a") as interfaces: + interfaces.write("iface eth1 inet dhcp\n") + interfaces.write("iface eth2 inet dhcp\n") + interfaces.write("iface eth3 inet dhcp\n") + interfaces.write("iface eth4 inet dhcp\n") + interfaces.write("iface eth5 inet dhcp\n") + interfaces.write("iface eth6 inet dhcp\n") + interfaces.write("iface eth7 inet dhcp\n") class InstallEnhancedNetworking(Task): - description = 'Installing enhanced networking kernel driver using DKMS' - phase = phases.system_modification - successors = [kernel.UpdateInitramfs] + description = 'Installing enhanced networking kernel driver using DKMS' + phase = phases.system_modification + successors = [kernel.UpdateInitramfs] - @classmethod - def run(cls, info): - version = '2.16.1' - drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version) - archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) - module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) + @classmethod + def run(cls, info): + version = '2.16.1' + drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version) + archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) + module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) - import urllib - urllib.urlretrieve(drivers_url, archive) + import urllib + urllib.urlretrieve(drivers_url, archive) - from bootstrapvz.common.tools import log_check_call - log_check_call(['tar', '--ungzip', - '--extract', - '--file', archive, - '--directory', os.path.join(info.root, 'usr', 'src')]) + from bootstrapvz.common.tools import log_check_call + log_check_call(['tar', '--ungzip', + '--extract', + '--file', archive, + '--directory', os.path.join(info.root, 'usr', 'src')]) - with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: - dkms_conf.write("""PACKAGE_NAME="ixgbevf" + with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: + dkms_conf.write("""PACKAGE_NAME="ixgbevf" PACKAGE_VERSION="%s" CLEAN="cd src/; make clean" MAKE="cd src/; make BUILD_KERNEL=${kernelver}" @@ -113,7 +113,7 @@ DEST_MODULE_NAME[0]="ixgbevf" AUTOINSTALL="yes" """ % (version)) - for task in ['add', 'build', 'install']: - # Invoke DKMS task using specified kernel module (-m) and version (-v) - log_check_call(['chroot', info.root, - 'dkms', task, '-m', 'ixgbevf', '-v', version, '-k', info.kernel_version]) + for task in ['add', 'build', 'install']: + # Invoke DKMS task using specified kernel module (-m) and version (-v) + log_check_call(['chroot', info.root, + 'dkms', task, '-m', 'ixgbevf', '-v', version, '-k', info.kernel_version]) diff --git a/bootstrapvz/providers/ec2/tasks/packages.py b/bootstrapvz/providers/ec2/tasks/packages.py index 4d51280..8aabb4e 100644 --- a/bootstrapvz/providers/ec2/tasks/packages.py +++ b/bootstrapvz/providers/ec2/tasks/packages.py @@ -4,28 +4,28 @@ import os.path class DefaultPackages(Task): - description = 'Adding image packages required for EC2' - phase = phases.preparation + description = 'Adding image packages required for EC2' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('file') # Needed for the init scripts + @classmethod + def run(cls, info): + info.packages.add('file') # Needed for the init scripts - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) class AddWorkaroundGrowpart(Task): - description = 'Adding growpart workaround for jessie' - phase = phases.system_modification + description = 'Adding growpart workaround for jessie' + phase = phases.system_modification - @classmethod - def run(cls, info): - from shutil import copy - from . import assets - src = os.path.join(assets, 'bin/growpart') - dst = os.path.join(info.root, 'usr/bin/growpart-workaround') - copy(src, dst) + @classmethod + def run(cls, info): + from shutil import copy + from . import assets + src = os.path.join(assets, 'bin/growpart') + dst = os.path.join(info.root, 'usr/bin/growpart-workaround') + copy(src, dst) diff --git a/bootstrapvz/providers/gce/__init__.py b/bootstrapvz/providers/gce/__init__.py index d775452..8cc3832 100644 --- a/bootstrapvz/providers/gce/__init__.py +++ b/bootstrapvz/providers/gce/__init__.py @@ -15,40 +15,40 @@ from bootstrapvz.common.tasks import volume def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update([apt.AddBackports, - loopback.AddRequiredCommands, - loopback.Create, - tasks.packages.DefaultPackages, - tasks.configuration.GatherReleaseInformation, - tasks.host.DisableIPv6, - tasks.boot.ConfigureGrub, - initd.AddExpandRoot, - initd.AdjustExpandRootScript, - tasks.initd.AdjustExpandRootDev, - initd.InstallInitScripts, - boot.BlackListModules, - boot.UpdateInitramfs, - ssh.AddSSHKeyGeneration, - ssh.DisableSSHPasswordAuthentication, - ssh.DisableRootLogin, - image.MoveImage, - tasks.image.CreateTarball, - volume.Delete, - ]) + taskset.update([apt.AddBackports, + loopback.AddRequiredCommands, + loopback.Create, + tasks.packages.DefaultPackages, + tasks.configuration.GatherReleaseInformation, + tasks.host.DisableIPv6, + tasks.boot.ConfigureGrub, + initd.AddExpandRoot, + initd.AdjustExpandRootScript, + tasks.initd.AdjustExpandRootDev, + initd.InstallInitScripts, + boot.BlackListModules, + boot.UpdateInitramfs, + ssh.AddSSHKeyGeneration, + ssh.DisableSSHPasswordAuthentication, + ssh.DisableRootLogin, + image.MoveImage, + tasks.image.CreateTarball, + volume.Delete, + ]) - if 'gcs_destination' in manifest.provider: - taskset.add(tasks.image.UploadImage) - if 'gce_project' in manifest.provider: - taskset.add(tasks.image.RegisterImage) + if 'gcs_destination' in manifest.provider: + taskset.add(tasks.image.UploadImage) + if 'gce_project' in manifest.provider: + taskset.add(tasks.image.RegisterImage) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/gce/tasks/boot.py b/bootstrapvz/providers/gce/tasks/boot.py index 95653a9..75657c1 100644 --- a/bootstrapvz/providers/gce/tasks/boot.py +++ b/bootstrapvz/providers/gce/tasks/boot.py @@ -5,13 +5,13 @@ import os.path class ConfigureGrub(Task): - description = 'Change grub configuration to allow for ttyS0 output' - phase = phases.system_modification - successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] + description = 'Change grub configuration to allow for ttyS0 output' + phase = phases.system_modification + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - grub_config = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1console=ttyS0,38400n8 elevator=noop"') - sed_i(grub_config, r'^.*(GRUB_TIMEOUT=).*$', r'GRUB_TIMEOUT=0') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + grub_config = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1console=ttyS0,38400n8 elevator=noop"') + sed_i(grub_config, r'^.*(GRUB_TIMEOUT=).*$', r'GRUB_TIMEOUT=0') diff --git a/bootstrapvz/providers/gce/tasks/configuration.py b/bootstrapvz/providers/gce/tasks/configuration.py index fbb92f5..e14f4c3 100644 --- a/bootstrapvz/providers/gce/tasks/configuration.py +++ b/bootstrapvz/providers/gce/tasks/configuration.py @@ -4,14 +4,14 @@ from bootstrapvz.common.tools import log_check_call class GatherReleaseInformation(Task): - description = 'Gathering release information about created image' - phase = phases.system_modification + description = 'Gathering release information about created image' + phase = phases.system_modification - @classmethod - def run(cls, info): - lsb_distribution = log_check_call(['chroot', info.root, 'lsb_release', '-i', '-s']) - lsb_description = log_check_call(['chroot', info.root, 'lsb_release', '-d', '-s']) - lsb_release = log_check_call(['chroot', info.root, 'lsb_release', '-r', '-s']) - info._gce['lsb_distribution'] = lsb_distribution[0] - info._gce['lsb_description'] = lsb_description[0] - info._gce['lsb_release'] = lsb_release[0] + @classmethod + def run(cls, info): + lsb_distribution = log_check_call(['chroot', info.root, 'lsb_release', '-i', '-s']) + lsb_description = log_check_call(['chroot', info.root, 'lsb_release', '-d', '-s']) + lsb_release = log_check_call(['chroot', info.root, 'lsb_release', '-r', '-s']) + info._gce['lsb_distribution'] = lsb_distribution[0] + info._gce['lsb_description'] = lsb_description[0] + info._gce['lsb_release'] = lsb_release[0] diff --git a/bootstrapvz/providers/gce/tasks/host.py b/bootstrapvz/providers/gce/tasks/host.py index 55b8387..9dd3a7a 100644 --- a/bootstrapvz/providers/gce/tasks/host.py +++ b/bootstrapvz/providers/gce/tasks/host.py @@ -5,13 +5,13 @@ import os.path class DisableIPv6(Task): - description = "Disabling IPv6 support" - phase = phases.system_modification - predecessors = [network.ConfigureNetworkIF] + description = "Disabling IPv6 support" + phase = phases.system_modification + predecessors = [network.ConfigureNetworkIF] - @classmethod - def run(cls, info): - network_configuration_path = os.path.join(info.root, 'etc/sysctl.d/70-disable-ipv6.conf') - with open(network_configuration_path, 'w') as config_file: - print >>config_file, "net.ipv6.conf.all.disable_ipv6 = 1" - print >>config_file, "net.ipv6.conf.lo.disable_ipv6 = 0" + @classmethod + def run(cls, info): + network_configuration_path = os.path.join(info.root, 'etc/sysctl.d/70-disable-ipv6.conf') + with open(network_configuration_path, 'w') as config_file: + print >>config_file, "net.ipv6.conf.all.disable_ipv6 = 1" + print >>config_file, "net.ipv6.conf.lo.disable_ipv6 = 0" diff --git a/bootstrapvz/providers/gce/tasks/image.py b/bootstrapvz/providers/gce/tasks/image.py index a6c53fc..5d07ff0 100644 --- a/bootstrapvz/providers/gce/tasks/image.py +++ b/bootstrapvz/providers/gce/tasks/image.py @@ -6,52 +6,52 @@ import os.path class CreateTarball(Task): - description = 'Creating tarball with image' - phase = phases.image_registration - predecessors = [image.MoveImage] + description = 'Creating tarball with image' + phase = phases.image_registration + predecessors = [image.MoveImage] - @classmethod - def run(cls, info): - image_name = info.manifest.name.format(**info.manifest_vars) - filename = image_name + '.' + info.volume.extension - # ensure that we do not use disallowed characters in image name - image_name = image_name.lower() - image_name = image_name.replace(".", "-") - info._gce['image_name'] = image_name - tarball_name = image_name + '.tar.gz' - tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) - info._gce['tarball_name'] = tarball_name - info._gce['tarball_path'] = tarball_path - # GCE requires that the file in the tar be named disk.raw, hence the transform - log_check_call(['tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], - '-caf', tarball_path, - '--transform=s|.*|disk.raw|', - filename]) + @classmethod + def run(cls, info): + image_name = info.manifest.name.format(**info.manifest_vars) + filename = image_name + '.' + info.volume.extension + # ensure that we do not use disallowed characters in image name + image_name = image_name.lower() + image_name = image_name.replace(".", "-") + info._gce['image_name'] = image_name + tarball_name = image_name + '.tar.gz' + tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) + info._gce['tarball_name'] = tarball_name + info._gce['tarball_path'] = tarball_path + # GCE requires that the file in the tar be named disk.raw, hence the transform + log_check_call(['tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], + '-caf', tarball_path, + '--transform=s|.*|disk.raw|', + filename]) class UploadImage(Task): - description = 'Uploading image to GCS' - phase = phases.image_registration - predecessors = [CreateTarball] + description = 'Uploading image to GCS' + phase = phases.image_registration + predecessors = [CreateTarball] - @classmethod - def run(cls, info): - log_check_call(['gsutil', 'cp', info._gce['tarball_path'], - info.manifest.provider['gcs_destination'] + info._gce['tarball_name']]) + @classmethod + def run(cls, info): + log_check_call(['gsutil', 'cp', info._gce['tarball_path'], + info.manifest.provider['gcs_destination'] + info._gce['tarball_name']]) class RegisterImage(Task): - description = 'Registering image with GCE' - phase = phases.image_registration - predecessors = [UploadImage] + description = 'Registering image with GCE' + phase = phases.image_registration + predecessors = [UploadImage] - @classmethod - def run(cls, info): - image_description = info._gce['lsb_description'] - if 'description' in info.manifest.provider: - image_description = info.manifest.provider['description'] - image_description = image_description.format(**info.manifest_vars) - log_check_call(['gcloud', 'compute', '--project=' + info.manifest.provider['gce_project'], - 'images', 'create', info._gce['image_name'], - '--source-uri=' + info.manifest.provider['gcs_destination'] + info._gce['tarball_name'], - '--description=' + image_description]) + @classmethod + def run(cls, info): + image_description = info._gce['lsb_description'] + if 'description' in info.manifest.provider: + image_description = info.manifest.provider['description'] + image_description = image_description.format(**info.manifest_vars) + log_check_call(['gcloud', 'compute', '--project=' + info.manifest.provider['gce_project'], + 'images', 'create', info._gce['image_name'], + '--source-uri=' + info.manifest.provider['gcs_destination'] + info._gce['tarball_name'], + '--description=' + image_description]) diff --git a/bootstrapvz/providers/gce/tasks/initd.py b/bootstrapvz/providers/gce/tasks/initd.py index 1f3ed7c..116e9b1 100644 --- a/bootstrapvz/providers/gce/tasks/initd.py +++ b/bootstrapvz/providers/gce/tasks/initd.py @@ -5,12 +5,12 @@ import os.path class AdjustExpandRootDev(Task): - description = 'Adjusting the expand-root device' - phase = phases.system_modification - predecessors = [initd.AddExpandRoot, initd.AdjustExpandRootScript] + description = 'Adjusting the expand-root device' + phase = phases.system_modification + predecessors = [initd.AddExpandRoot, initd.AdjustExpandRootScript] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - script = os.path.join(info.root, 'etc/init.d/expand-root') - sed_i(script, '/dev/loop0', '/dev/sda') + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + script = os.path.join(info.root, 'etc/init.d/expand-root') + sed_i(script, '/dev/loop0', '/dev/sda') diff --git a/bootstrapvz/providers/gce/tasks/packages.py b/bootstrapvz/providers/gce/tasks/packages.py index 4326c86..1d4907a 100644 --- a/bootstrapvz/providers/gce/tasks/packages.py +++ b/bootstrapvz/providers/gce/tasks/packages.py @@ -6,30 +6,30 @@ import os class DefaultPackages(Task): - description = 'Adding image packages required for GCE' - phase = phases.preparation - successors = [packages.AddManifestPackages] + description = 'Adding image packages required for GCE' + phase = phases.preparation + successors = [packages.AddManifestPackages] - @classmethod - def run(cls, info): - info.packages.add('acpi-support-base') - info.packages.add('busybox') - info.packages.add('ca-certificates') - info.packages.add('curl') - info.packages.add('ethtool') - info.packages.add('gdisk') - info.packages.add('kpartx') - info.packages.add('isc-dhcp-client') - info.packages.add('lsb-release') - info.packages.add('ntp') - info.packages.add('parted') - info.packages.add('python') - info.packages.add('openssh-client') - info.packages.add('openssh-server') - info.packages.add('sudo') - info.packages.add('uuid-runtime') + @classmethod + def run(cls, info): + info.packages.add('acpi-support-base') + info.packages.add('busybox') + info.packages.add('ca-certificates') + info.packages.add('curl') + info.packages.add('ethtool') + info.packages.add('gdisk') + info.packages.add('kpartx') + info.packages.add('isc-dhcp-client') + info.packages.add('lsb-release') + info.packages.add('ntp') + info.packages.add('parted') + info.packages.add('python') + info.packages.add('openssh-client') + info.packages.add('openssh-server') + info.packages.add('sudo') + info.packages.add('uuid-runtime') - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/providers/kvm/__init__.py b/bootstrapvz/providers/kvm/__init__.py index 415e42d..b056676 100644 --- a/bootstrapvz/providers/kvm/__init__.py +++ b/bootstrapvz/providers/kvm/__init__.py @@ -7,28 +7,28 @@ from bootstrapvz.common.tasks import ssh def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update([tasks.packages.DefaultPackages, - loopback.AddRequiredCommands, - loopback.Create, - initd.InstallInitScripts, - ssh.AddOpenSSHPackage, - ssh.ShredHostkeys, - ssh.AddSSHKeyGeneration, - image.MoveImage, - ]) + taskset.update([tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, + loopback.Create, + initd.InstallInitScripts, + ssh.AddOpenSSHPackage, + ssh.ShredHostkeys, + ssh.AddSSHKeyGeneration, + image.MoveImage, + ]) - if manifest.provider.get('virtio', []): - from tasks import virtio - taskset.update([virtio.VirtIO]) + if manifest.provider.get('virtio', []): + from tasks import virtio + taskset.update([virtio.VirtIO]) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/kvm/tasks/packages.py b/bootstrapvz/providers/kvm/tasks/packages.py index 9fe6bbe..1dd20ec 100644 --- a/bootstrapvz/providers/kvm/tasks/packages.py +++ b/bootstrapvz/providers/kvm/tasks/packages.py @@ -3,14 +3,14 @@ from bootstrapvz.common import phases class DefaultPackages(Task): - description = 'Adding image packages required for kvm' - phase = phases.preparation + description = 'Adding image packages required for kvm' + phase = phases.preparation - @classmethod - def run(cls, info): - import os.path - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + @classmethod + def run(cls, info): + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/providers/kvm/tasks/virtio.py b/bootstrapvz/providers/kvm/tasks/virtio.py index 402ba1a..91e7913 100644 --- a/bootstrapvz/providers/kvm/tasks/virtio.py +++ b/bootstrapvz/providers/kvm/tasks/virtio.py @@ -4,13 +4,13 @@ import os class VirtIO(Task): - description = 'Install virtio modules' - phase = phases.system_modification + description = 'Install virtio modules' + phase = phases.system_modification - @classmethod - def run(cls, info): - modules = os.path.join(info.root, '/etc/initramfs-tools/modules') - with open(modules, "a") as modules_file: - modules_file.write("\n") - for module in info.manifest.provider.get('virtio', []): - modules_file.write(module + "\n") + @classmethod + def run(cls, info): + modules = os.path.join(info.root, '/etc/initramfs-tools/modules') + with open(modules, "a") as modules_file: + modules_file.write("\n") + for module in info.manifest.provider.get('virtio', []): + modules_file.write(module + "\n") diff --git a/bootstrapvz/providers/oracle/__init__.py b/bootstrapvz/providers/oracle/__init__.py index 7725028..dba4da0 100644 --- a/bootstrapvz/providers/oracle/__init__.py +++ b/bootstrapvz/providers/oracle/__init__.py @@ -10,40 +10,40 @@ import tasks.packages def validate_manifest(data, validator, error): - import os.path - validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + import os.path + validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - keys = ['username', 'password', 'identity-domain'] - if 'credentials' in data['provider']: - if not all(key in data['provider']['credentials'] for key in keys): - msg = 'All Oracle Compute Cloud credentials should be specified in the manifest' - error(msg, ['provider', 'credentials']) - if not data['provider'].get('container'): - msg = 'The container to which the image will be uploaded should be specified' - error(msg, ['provider']) + keys = ['username', 'password', 'identity-domain'] + if 'credentials' in data['provider']: + if not all(key in data['provider']['credentials'] for key in keys): + msg = 'All Oracle Compute Cloud credentials should be specified in the manifest' + error(msg, ['provider', 'credentials']) + if not data['provider'].get('container'): + msg = 'The container to which the image will be uploaded should be specified' + error(msg, ['provider']) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update(task_groups.ssh_group) + taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update(task_groups.ssh_group) - taskset.update([loopback.AddRequiredCommands, - loopback.Create, - image.MoveImage, - ssh.DisableRootLogin, - volume.Delete, - tasks.image.CreateImageTarball, - tasks.network.InstallDHCPCD, - tasks.packages.DefaultPackages, - ]) + taskset.update([loopback.AddRequiredCommands, + loopback.Create, + image.MoveImage, + ssh.DisableRootLogin, + volume.Delete, + tasks.image.CreateImageTarball, + tasks.network.InstallDHCPCD, + tasks.packages.DefaultPackages, + ]) - if 'credentials' in manifest.provider: - taskset.add(tasks.api.Connect) - taskset.add(tasks.image.UploadImageTarball) - if manifest.provider.get('verify', False): - taskset.add(tasks.image.DownloadImageTarball) - taskset.add(tasks.image.CompareImageTarballs) + if 'credentials' in manifest.provider: + taskset.add(tasks.api.Connect) + taskset.add(tasks.image.UploadImageTarball) + if manifest.provider.get('verify', False): + taskset.add(tasks.image.DownloadImageTarball) + taskset.add(tasks.image.CompareImageTarballs) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/oracle/apiclient.py b/bootstrapvz/providers/oracle/apiclient.py index fa0f4bc..d162bd4 100644 --- a/bootstrapvz/providers/oracle/apiclient.py +++ b/bootstrapvz/providers/oracle/apiclient.py @@ -6,128 +6,128 @@ from bootstrapvz.common.bytes import Bytes class OracleStorageAPIClient: - def __init__(self, username, password, identity_domain, container): - self.username = username - self.password = password - self.identity_domain = identity_domain - self.container = container - self.base_url = 'https://' + identity_domain + '.storage.oraclecloud.com' - self.log = logging.getLogger(__name__) + def __init__(self, username, password, identity_domain, container): + self.username = username + self.password = password + self.identity_domain = identity_domain + self.container = container + self.base_url = 'https://' + identity_domain + '.storage.oraclecloud.com' + self.log = logging.getLogger(__name__) - # Avoid 'requests' INFO/DEBUG log messages - logging.getLogger('requests').setLevel(logging.WARNING) - logging.getLogger('urllib3').setLevel(logging.WARNING) + # Avoid 'requests' INFO/DEBUG log messages + logging.getLogger('requests').setLevel(logging.WARNING) + logging.getLogger('urllib3').setLevel(logging.WARNING) - def _fail(self, error): - raise RuntimeError('Oracle Storage Cloud API - ' + error) + def _fail(self, error): + raise RuntimeError('Oracle Storage Cloud API - ' + error) - @property - def auth_token(self): - headers = { - 'X-Storage-User': 'Storage-{id_domain}:{user}'.format( - id_domain=self.identity_domain, - user=self.username, - ), - 'X-Storage-Pass': self.password, - } - url = self.base_url + '/auth/v1.0' - response = requests.get(url, headers=headers) - if response.status_code == 200: - return response.headers.get('x-auth-token') - else: - self._fail(response.text) + @property + def auth_token(self): + headers = { + 'X-Storage-User': 'Storage-{id_domain}:{user}'.format( + id_domain=self.identity_domain, + user=self.username, + ), + 'X-Storage-Pass': self.password, + } + url = self.base_url + '/auth/v1.0' + response = requests.get(url, headers=headers) + if response.status_code == 200: + return response.headers.get('x-auth-token') + else: + self._fail(response.text) - @property - def chunk_size(self): - file_size = os.path.getsize(self.file_path) - if file_size > int(Bytes('300MiB')): - chunk_size = int(Bytes('100MiB')) - else: - chunk_size = int(Bytes('50MiB')) - return chunk_size + @property + def chunk_size(self): + file_size = os.path.getsize(self.file_path) + if file_size > int(Bytes('300MiB')): + chunk_size = int(Bytes('100MiB')) + else: + chunk_size = int(Bytes('50MiB')) + return chunk_size - def compare_files(self): - uploaded_file_md5 = hashlib.md5() - downloaded_file_md5 = hashlib.md5() - files = [self.file_path, self.target_file_path] - hashes = [uploaded_file_md5, downloaded_file_md5] - for f, h in zip(files, hashes): - with open(f, 'rb') as current_file: - while True: - data = current_file.read(int(Bytes('8MiB'))) - if not data: - break - h.update(data) - if uploaded_file_md5.hexdigest() != downloaded_file_md5.hexdigest(): - self.log.error('File hashes mismatch') - else: - self.log.debug('Both files have the same hash') + def compare_files(self): + uploaded_file_md5 = hashlib.md5() + downloaded_file_md5 = hashlib.md5() + files = [self.file_path, self.target_file_path] + hashes = [uploaded_file_md5, downloaded_file_md5] + for f, h in zip(files, hashes): + with open(f, 'rb') as current_file: + while True: + data = current_file.read(int(Bytes('8MiB'))) + if not data: + break + h.update(data) + if uploaded_file_md5.hexdigest() != downloaded_file_md5.hexdigest(): + self.log.error('File hashes mismatch') + else: + self.log.debug('Both files have the same hash') - def create_manifest(self): - headers = { - 'X-Auth-Token': self.auth_token, - 'X-Object-Manifest': '{container}/{object_name}-'.format( - container=self.container, - object_name=self.file_name, - ), - 'Content-Length': '0', - } - url = self.object_url - self.log.debug('Creating remote manifest to join chunks') - response = requests.put(url, headers=headers) - if response.status_code != 201: - self._fail(response.text) + def create_manifest(self): + headers = { + 'X-Auth-Token': self.auth_token, + 'X-Object-Manifest': '{container}/{object_name}-'.format( + container=self.container, + object_name=self.file_name, + ), + 'Content-Length': '0', + } + url = self.object_url + self.log.debug('Creating remote manifest to join chunks') + response = requests.put(url, headers=headers) + if response.status_code != 201: + self._fail(response.text) - def download_file(self): - headers = { - 'X-Auth-Token': self.auth_token, - } - url = self.object_url - response = requests.get(url, headers=headers, stream=True) - if response.status_code != 200: - self._fail(response.text) - with open(self.target_file_path, 'wb') as f: - for chunk in response.iter_content(chunk_size=int(Bytes('8MiB'))): - if chunk: - f.write(chunk) + def download_file(self): + headers = { + 'X-Auth-Token': self.auth_token, + } + url = self.object_url + response = requests.get(url, headers=headers, stream=True) + if response.status_code != 200: + self._fail(response.text) + with open(self.target_file_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=int(Bytes('8MiB'))): + if chunk: + f.write(chunk) - @property - def file_name(self): - return os.path.basename(self.file_path) + @property + def file_name(self): + return os.path.basename(self.file_path) - @property - def object_url(self): - url = '{base}/v1/Storage-{id_domain}/{container}/{object_name}'.format( - base=self.base_url, - id_domain=self.identity_domain, - container=self.container, - object_name=self.file_name, - ) - return url + @property + def object_url(self): + url = '{base}/v1/Storage-{id_domain}/{container}/{object_name}'.format( + base=self.base_url, + id_domain=self.identity_domain, + container=self.container, + object_name=self.file_name, + ) + return url - def upload_file(self): - f = open(self.file_path, 'rb') - n = 1 - while True: - chunk = f.read(self.chunk_size) - if not chunk: - break - chunk_name = '{name}-{number}'.format( - name=self.file_name, - number='{0:04d}'.format(n), - ) - headers = { - 'X-Auth-Token': self.auth_token, - } - url = '{base}/v1/Storage-{id_domain}/{container}/{object_chunk_name}'.format( - base=self.base_url, - id_domain=self.identity_domain, - container=self.container, - object_chunk_name=chunk_name, - ) - self.log.debug('Uploading chunk ' + chunk_name) - response = requests.put(url, data=chunk, headers=headers) - if response.status_code != 201: - self._fail(response.text) - n += 1 - self.create_manifest() + def upload_file(self): + f = open(self.file_path, 'rb') + n = 1 + while True: + chunk = f.read(self.chunk_size) + if not chunk: + break + chunk_name = '{name}-{number}'.format( + name=self.file_name, + number='{0:04d}'.format(n), + ) + headers = { + 'X-Auth-Token': self.auth_token, + } + url = '{base}/v1/Storage-{id_domain}/{container}/{object_chunk_name}'.format( + base=self.base_url, + id_domain=self.identity_domain, + container=self.container, + object_chunk_name=chunk_name, + ) + self.log.debug('Uploading chunk ' + chunk_name) + response = requests.put(url, data=chunk, headers=headers) + if response.status_code != 201: + self._fail(response.text) + n += 1 + self.create_manifest() diff --git a/bootstrapvz/providers/oracle/tasks/api.py b/bootstrapvz/providers/oracle/tasks/api.py index 523b928..b2bedc0 100644 --- a/bootstrapvz/providers/oracle/tasks/api.py +++ b/bootstrapvz/providers/oracle/tasks/api.py @@ -4,16 +4,16 @@ from bootstrapvz.providers.oracle.apiclient import OracleStorageAPIClient class Connect(Task): - description = 'Connecting to the Oracle Storage Cloud API' - phase = phases.preparation + description = 'Connecting to the Oracle Storage Cloud API' + phase = phases.preparation - @classmethod - def run(cls, info): - info._oracle['client'] = OracleStorageAPIClient( - username=info.manifest.provider['credentials']['username'], - password=info.manifest.provider['credentials']['password'], - identity_domain=info.manifest.provider['credentials']['identity-domain'], - container=info.manifest.provider['container'], - ) - # Try to fetch the token, so it will fail early if the credentials are wrong - info._oracle['client'].auth_token + @classmethod + def run(cls, info): + info._oracle['client'] = OracleStorageAPIClient( + username=info.manifest.provider['credentials']['username'], + password=info.manifest.provider['credentials']['password'], + identity_domain=info.manifest.provider['credentials']['identity-domain'], + container=info.manifest.provider['container'], + ) + # Try to fetch the token, so it will fail early if the credentials are wrong + info._oracle['client'].auth_token diff --git a/bootstrapvz/providers/oracle/tasks/image.py b/bootstrapvz/providers/oracle/tasks/image.py index 41acc4c..62fb20d 100644 --- a/bootstrapvz/providers/oracle/tasks/image.py +++ b/bootstrapvz/providers/oracle/tasks/image.py @@ -6,54 +6,54 @@ import os class CreateImageTarball(Task): - description = 'Creating tarball with image' - phase = phases.image_registration - predecessors = [image.MoveImage] + description = 'Creating tarball with image' + phase = phases.image_registration + predecessors = [image.MoveImage] - @classmethod - def run(cls, info): - image_name = info.manifest.name.format(**info.manifest_vars) - filename = image_name + '.' + info.volume.extension + @classmethod + def run(cls, info): + image_name = info.manifest.name.format(**info.manifest_vars) + filename = image_name + '.' + info.volume.extension - tarball_name = image_name + '.tar.gz' - tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) - info._oracle['tarball_path'] = tarball_path - log_check_call(['tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], - '-caf', tarball_path, filename]) + tarball_name = image_name + '.tar.gz' + tarball_path = os.path.join(info.manifest.bootstrapper['workspace'], tarball_name) + info._oracle['tarball_path'] = tarball_path + log_check_call(['tar', '--sparse', '-C', info.manifest.bootstrapper['workspace'], + '-caf', tarball_path, filename]) class UploadImageTarball(Task): - description = 'Uploading image tarball' - phase = phases.image_registration - predecessors = [CreateImageTarball] + description = 'Uploading image tarball' + phase = phases.image_registration + predecessors = [CreateImageTarball] - @classmethod - def run(cls, info): - info._oracle['client'].file_path = info._oracle['tarball_path'] - info._oracle['client'].upload_file() + @classmethod + def run(cls, info): + info._oracle['client'].file_path = info._oracle['tarball_path'] + info._oracle['client'].upload_file() class DownloadImageTarball(Task): - description = 'Downloading image tarball for integrity verification' - phase = phases.image_registration - predecessors = [UploadImageTarball] + description = 'Downloading image tarball for integrity verification' + phase = phases.image_registration + predecessors = [UploadImageTarball] - @classmethod - def run(cls, info): - tmp_tarball_path = '{tarball_path}-{pid}.tmp'.format( - tarball_path=info._oracle['tarball_path'], - pid=os.getpid(), - ) - info._oracle['client'].target_file_path = tmp_tarball_path - info._oracle['client'].download_file() + @classmethod + def run(cls, info): + tmp_tarball_path = '{tarball_path}-{pid}.tmp'.format( + tarball_path=info._oracle['tarball_path'], + pid=os.getpid(), + ) + info._oracle['client'].target_file_path = tmp_tarball_path + info._oracle['client'].download_file() class CompareImageTarballs(Task): - description = 'Comparing uploaded and downloaded image tarballs hashes' - phase = phases.image_registration - predecessors = [DownloadImageTarball] + description = 'Comparing uploaded and downloaded image tarballs hashes' + phase = phases.image_registration + predecessors = [DownloadImageTarball] - @classmethod - def run(cls, info): - info._oracle['client'].compare_files() - os.remove(info._oracle['client'].target_file_path) + @classmethod + def run(cls, info): + info._oracle['client'].compare_files() + os.remove(info._oracle['client'].target_file_path) diff --git a/bootstrapvz/providers/oracle/tasks/network.py b/bootstrapvz/providers/oracle/tasks/network.py index 4c32904..7cbfbfd 100644 --- a/bootstrapvz/providers/oracle/tasks/network.py +++ b/bootstrapvz/providers/oracle/tasks/network.py @@ -3,11 +3,11 @@ from bootstrapvz.common import phases class InstallDHCPCD(Task): - description = 'Replacing isc-dhcp with dhcpcd5' - phase = phases.preparation + description = 'Replacing isc-dhcp with dhcpcd5' + phase = phases.preparation - @classmethod - def run(cls, info): - info.packages.add('dhcpcd5') - info.exclude_packages.add('isc-dhcp-client') - info.exclude_packages.add('isc-dhcp-common') + @classmethod + def run(cls, info): + info.packages.add('dhcpcd5') + info.exclude_packages.add('isc-dhcp-client') + info.exclude_packages.add('isc-dhcp-common') diff --git a/bootstrapvz/providers/oracle/tasks/packages.py b/bootstrapvz/providers/oracle/tasks/packages.py index 986bf5f..66eb150 100644 --- a/bootstrapvz/providers/oracle/tasks/packages.py +++ b/bootstrapvz/providers/oracle/tasks/packages.py @@ -5,12 +5,12 @@ import os.path class DefaultPackages(Task): - description = 'Adding image packages required for Oracle Compute Cloud' - phase = phases.preparation + description = 'Adding image packages required for Oracle Compute Cloud' + phase = phases.preparation - @classmethod - def run(cls, info): - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + @classmethod + def run(cls, info): + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/providers/virtualbox/__init__.py b/bootstrapvz/providers/virtualbox/__init__.py index 05cc223..01e6516 100644 --- a/bootstrapvz/providers/virtualbox/__init__.py +++ b/bootstrapvz/providers/virtualbox/__init__.py @@ -5,27 +5,27 @@ from bootstrapvz.common.tasks import loopback def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) def resolve_tasks(taskset, manifest): - taskset.update(task_groups.get_standard_groups(manifest)) + taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update([tasks.packages.DefaultPackages, - loopback.AddRequiredCommands, - loopback.Create, - image.MoveImage, - ]) + taskset.update([tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, + loopback.Create, + image.MoveImage, + ]) - if manifest.provider.get('guest_additions', False): - from tasks import guest_additions - taskset.update([guest_additions.CheckGuestAdditionsPath, - guest_additions.AddGuestAdditionsPackages, - guest_additions.InstallGuestAdditions, - ]) + if manifest.provider.get('guest_additions', False): + from tasks import guest_additions + taskset.update([guest_additions.CheckGuestAdditionsPath, + guest_additions.AddGuestAdditionsPackages, + guest_additions.InstallGuestAdditions, + ]) def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - taskset.update(task_groups.get_standard_rollback_tasks(completed)) + taskset.update(task_groups.get_standard_rollback_tasks(completed)) diff --git a/bootstrapvz/providers/virtualbox/tasks/guest_additions.py b/bootstrapvz/providers/virtualbox/tasks/guest_additions.py index ac69607..7b4b929 100644 --- a/bootstrapvz/providers/virtualbox/tasks/guest_additions.py +++ b/bootstrapvz/providers/virtualbox/tasks/guest_additions.py @@ -8,78 +8,78 @@ assets = os.path.normpath(os.path.join(os.path.dirname(__file__), '../assets')) class CheckGuestAdditionsPath(Task): - description = 'Checking whether the VirtualBox Guest Additions image exists' - phase = phases.preparation + description = 'Checking whether the VirtualBox Guest Additions image exists' + phase = phases.preparation - @classmethod - def run(cls, info): - guest_additions_path = info.manifest.provider['guest_additions'] - if not os.path.exists(guest_additions_path): - msg = 'The file {file} does not exist.'.format(file=guest_additions_path) - raise TaskError(msg) + @classmethod + def run(cls, info): + guest_additions_path = info.manifest.provider['guest_additions'] + if not os.path.exists(guest_additions_path): + msg = 'The file {file} does not exist.'.format(file=guest_additions_path) + raise TaskError(msg) class AddGuestAdditionsPackages(Task): - description = 'Adding packages to support Guest Additions installation' - phase = phases.package_installation - successors = [InstallPackages] + description = 'Adding packages to support Guest Additions installation' + phase = phases.package_installation + successors = [InstallPackages] - @classmethod - def run(cls, info): - info.packages.add('bzip2') - info.packages.add('build-essential') - info.packages.add('dkms') + @classmethod + def run(cls, info): + info.packages.add('bzip2') + info.packages.add('build-essential') + info.packages.add('dkms') - kernel_headers_pkg = 'linux-headers-' - if info.manifest.system['architecture'] == 'i386': - arch = 'i686' - kernel_headers_pkg += '686-pae' - else: - arch = 'x86_64' - kernel_headers_pkg += 'amd64' - info.packages.add(kernel_headers_pkg) - info.kernel = { - 'arch': arch, - 'headers_pkg': kernel_headers_pkg, - } + kernel_headers_pkg = 'linux-headers-' + if info.manifest.system['architecture'] == 'i386': + arch = 'i686' + kernel_headers_pkg += '686-pae' + else: + arch = 'x86_64' + kernel_headers_pkg += 'amd64' + info.packages.add(kernel_headers_pkg) + info.kernel = { + 'arch': arch, + 'headers_pkg': kernel_headers_pkg, + } class InstallGuestAdditions(Task): - description = 'Installing the VirtualBox Guest Additions' - phase = phases.package_installation - predecessors = [InstallPackages] + description = 'Installing the VirtualBox Guest Additions' + phase = phases.package_installation + predecessors = [InstallPackages] - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import log_call, log_check_call - for line in log_check_call(['chroot', info.root, 'apt-cache', 'show', info.kernel['headers_pkg']]): - key, value = line.split(':') - if key.strip() == 'Depends': - kernel_version = value.strip().split('linux-headers-')[-1] - break + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_call, log_check_call + for line in log_check_call(['chroot', info.root, 'apt-cache', 'show', info.kernel['headers_pkg']]): + key, value = line.split(':') + if key.strip() == 'Depends': + kernel_version = value.strip().split('linux-headers-')[-1] + break - guest_additions_path = info.manifest.provider['guest_additions'] - mount_dir = 'mnt/guest_additions' - mount_path = os.path.join(info.root, mount_dir) - os.mkdir(mount_path) - root = info.volume.partition_map.root - root.add_mount(guest_additions_path, mount_path, ['-o', 'loop']) - install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') - install_wrapper_name = 'install_guest_additions.sh' - install_wrapper = open(os.path.join(assets, install_wrapper_name)) \ - .read() \ - .replace("KERNEL_VERSION", kernel_version) \ - .replace("KERNEL_ARCH", info.kernel['arch']) \ - .replace("INSTALL_SCRIPT", install_script) - install_wrapper_path = os.path.join(info.root, install_wrapper_name) - with open(install_wrapper_path, 'w') as f: - f.write(install_wrapper + '\n') + guest_additions_path = info.manifest.provider['guest_additions'] + mount_dir = 'mnt/guest_additions' + mount_path = os.path.join(info.root, mount_dir) + os.mkdir(mount_path) + root = info.volume.partition_map.root + root.add_mount(guest_additions_path, mount_path, ['-o', 'loop']) + install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') + install_wrapper_name = 'install_guest_additions.sh' + install_wrapper = open(os.path.join(assets, install_wrapper_name)) \ + .read() \ + .replace("KERNEL_VERSION", kernel_version) \ + .replace("KERNEL_ARCH", info.kernel['arch']) \ + .replace("INSTALL_SCRIPT", install_script) + install_wrapper_path = os.path.join(info.root, install_wrapper_name) + with open(install_wrapper_path, 'w') as f: + f.write(install_wrapper + '\n') - # Don't check the return code of the scripts here, because 1 not necessarily means they have failed - log_call(['chroot', info.root, 'bash', '/' + install_wrapper_name]) + # Don't check the return code of the scripts here, because 1 not necessarily means they have failed + log_call(['chroot', info.root, 'bash', '/' + install_wrapper_name]) - # VBoxService process could be running, as it is not affected by DisableDaemonAutostart - log_call(['chroot', info.root, 'service', 'vboxadd-service', 'stop']) - root.remove_mount(mount_path) - os.rmdir(mount_path) - os.remove(install_wrapper_path) + # VBoxService process could be running, as it is not affected by DisableDaemonAutostart + log_call(['chroot', info.root, 'service', 'vboxadd-service', 'stop']) + root.remove_mount(mount_path) + os.rmdir(mount_path) + os.remove(install_wrapper_path) diff --git a/bootstrapvz/providers/virtualbox/tasks/packages.py b/bootstrapvz/providers/virtualbox/tasks/packages.py index 8bae0b8..07ce1b6 100644 --- a/bootstrapvz/providers/virtualbox/tasks/packages.py +++ b/bootstrapvz/providers/virtualbox/tasks/packages.py @@ -3,14 +3,14 @@ from bootstrapvz.common import phases class DefaultPackages(Task): - description = 'Adding image packages required for virtualbox' - phase = phases.preparation + description = 'Adding image packages required for virtualbox' + phase = phases.preparation - @classmethod - def run(cls, info): - import os.path - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') - from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, - info.manifest.system['architecture']]) - info.packages.add(kernel_package) + @classmethod + def run(cls, info): + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/remote/__init__.py b/bootstrapvz/remote/__init__.py index 35303e2..81e28dd 100644 --- a/bootstrapvz/remote/__init__.py +++ b/bootstrapvz/remote/__init__.py @@ -39,70 +39,70 @@ supported_exceptions = ['bootstrapvz.common.exceptions.ManifestError', def register_deserialization_handlers(): - for supported_class in supported_classes: - SerializerBase.register_dict_to_class(supported_class, deserialize) - for supported_exc in supported_exceptions: - SerializerBase.register_dict_to_class(supported_exc, deserialize_exception) - import subprocess - SerializerBase.register_class_to_dict(subprocess.CalledProcessError, serialize_called_process_error) + for supported_class in supported_classes: + SerializerBase.register_dict_to_class(supported_class, deserialize) + for supported_exc in supported_exceptions: + SerializerBase.register_dict_to_class(supported_exc, deserialize_exception) + import subprocess + SerializerBase.register_class_to_dict(subprocess.CalledProcessError, serialize_called_process_error) def unregister_deserialization_handlers(): - for supported_class in supported_classes: - SerializerBase.unregister_dict_to_class(supported_class, deserialize) - for supported_exc in supported_exceptions: - SerializerBase.unregister_dict_to_class(supported_exc, deserialize_exception) + for supported_class in supported_classes: + SerializerBase.unregister_dict_to_class(supported_class, deserialize) + for supported_exc in supported_exceptions: + SerializerBase.unregister_dict_to_class(supported_exc, deserialize_exception) def deserialize_exception(fq_classname, data): - class_object = get_class_object(fq_classname) - return SerializerBase.make_exception(class_object, data) + class_object = get_class_object(fq_classname) + return SerializerBase.make_exception(class_object, data) def deserialize(fq_classname, data): - class_object = get_class_object(fq_classname) - from Pyro4.util import SerpentSerializer - from Pyro4.errors import SecurityError - ser = SerpentSerializer() - state = {} - for key, value in data.items(): - try: - state[key] = ser.recreate_classes(value) - except SecurityError as e: - msg = 'Unable to deserialize key `{key}\' on {class_name}'.format(key=key, class_name=fq_classname) - raise Exception(msg, e) + class_object = get_class_object(fq_classname) + from Pyro4.util import SerpentSerializer + from Pyro4.errors import SecurityError + ser = SerpentSerializer() + state = {} + for key, value in data.items(): + try: + state[key] = ser.recreate_classes(value) + except SecurityError as e: + msg = 'Unable to deserialize key `{key}\' on {class_name}'.format(key=key, class_name=fq_classname) + raise Exception(msg, e) - instance = class_object.__new__(class_object) - instance.__setstate__(state) - return instance + instance = class_object.__new__(class_object) + instance.__setstate__(state) + return instance def serialize_called_process_error(obj): - # This is by far the weirdest exception serialization. - # There is a bug in both Pyro4 and the Python subprocess module. - # CalledProcessError does not populate its args property, - # although according to https://docs.python.org/2/library/exceptions.html#exceptions.BaseException.args - # it should... - # So we populate that property during serialization instead - # (the code is grabbed directly from Pyro4's class_to_dict()) - # However, Pyro4 still cannot figure out to call the deserializer - # unless we also use setattr() on the exception to set the args below - # (before throwing it). - # Mind you, the error "__init__() takes at least 3 arguments (2 given)" - # is thrown *on the server* if we don't use setattr(). - # It's all very confusing to me and I'm not entirely - # sure what the exact problem is. Regardless - it works, so there. - return {'__class__': obj.__class__.__module__ + '.' + obj.__class__.__name__, - '__exception__': True, - 'args': (obj.returncode, obj.cmd, obj.output), - 'attributes': vars(obj) # add custom exception attributes - } + # This is by far the weirdest exception serialization. + # There is a bug in both Pyro4 and the Python subprocess module. + # CalledProcessError does not populate its args property, + # although according to https://docs.python.org/2/library/exceptions.html#exceptions.BaseException.args + # it should... + # So we populate that property during serialization instead + # (the code is grabbed directly from Pyro4's class_to_dict()) + # However, Pyro4 still cannot figure out to call the deserializer + # unless we also use setattr() on the exception to set the args below + # (before throwing it). + # Mind you, the error "__init__() takes at least 3 arguments (2 given)" + # is thrown *on the server* if we don't use setattr(). + # It's all very confusing to me and I'm not entirely + # sure what the exact problem is. Regardless - it works, so there. + return {'__class__': obj.__class__.__module__ + '.' + obj.__class__.__name__, + '__exception__': True, + 'args': (obj.returncode, obj.cmd, obj.output), + 'attributes': vars(obj) # add custom exception attributes + } def get_class_object(fq_classname): - parts = fq_classname.split('.') - module_name = '.'.join(parts[:-1]) - class_name = parts[-1] - import importlib - imported_module = importlib.import_module(module_name) - return getattr(imported_module, class_name) + parts = fq_classname.split('.') + module_name = '.'.join(parts[:-1]) + class_name = parts[-1] + import importlib + imported_module = importlib.import_module(module_name) + return getattr(imported_module, class_name) diff --git a/bootstrapvz/remote/build_servers/__init__.py b/bootstrapvz/remote/build_servers/__init__.py index f6da332..25a59b7 100644 --- a/bootstrapvz/remote/build_servers/__init__.py +++ b/bootstrapvz/remote/build_servers/__init__.py @@ -1,46 +1,46 @@ def pick_build_server(build_servers, manifest, preferences={}): - # Validate the build servers list - from bootstrapvz.common.tools import load_data - import os.path - schema = load_data(os.path.normpath(os.path.join(os.path.dirname(__file__), 'build-servers-schema.yml'))) - import jsonschema - jsonschema.validate(build_servers, schema) + # Validate the build servers list + from bootstrapvz.common.tools import load_data + import os.path + schema = load_data(os.path.normpath(os.path.join(os.path.dirname(__file__), 'build-servers-schema.yml'))) + import jsonschema + jsonschema.validate(build_servers, schema) - if manifest['provider']['name'] == 'ec2': - must_bootstrap = 'ec2-' + manifest['volume']['backing'] - else: - must_bootstrap = manifest['provider']['name'] + if manifest['provider']['name'] == 'ec2': + must_bootstrap = 'ec2-' + manifest['volume']['backing'] + else: + must_bootstrap = manifest['provider']['name'] - def matches(name, settings): - if preferences.get('name', name) != name: - return False - if preferences.get('release', settings['release']) != settings['release']: - return False - if must_bootstrap not in settings['can_bootstrap']: - return False - return True + def matches(name, settings): + if preferences.get('name', name) != name: + return False + if preferences.get('release', settings['release']) != settings['release']: + return False + if must_bootstrap not in settings['can_bootstrap']: + return False + return True - for name, settings in build_servers.iteritems(): - if not matches(name, settings): - continue - if settings['type'] == 'local': - from local import LocalBuildServer - return LocalBuildServer(name, settings) - else: - from remote import RemoteBuildServer - return RemoteBuildServer(name, settings) - raise Exception('Unable to find a build server that matches your preferences.') + for name, settings in build_servers.iteritems(): + if not matches(name, settings): + continue + if settings['type'] == 'local': + from local import LocalBuildServer + return LocalBuildServer(name, settings) + else: + from remote import RemoteBuildServer + return RemoteBuildServer(name, settings) + raise Exception('Unable to find a build server that matches your preferences.') def getNPorts(n, port_range=(1024, 65535)): - import random - ports = [] - for i in range(0, n): - while True: - port = random.randrange(*port_range) - if port not in ports: - ports.append(port) - break - return ports + import random + ports = [] + for i in range(0, n): + while True: + port = random.randrange(*port_range) + if port not in ports: + ports.append(port) + break + return ports diff --git a/bootstrapvz/remote/build_servers/build_server.py b/bootstrapvz/remote/build_servers/build_server.py index 3e635a8..14bdfe4 100644 --- a/bootstrapvz/remote/build_servers/build_server.py +++ b/bootstrapvz/remote/build_servers/build_server.py @@ -2,26 +2,26 @@ class BuildServer(object): - def __init__(self, name, settings): - self.name = name - self.settings = settings - self.build_settings = settings.get('build_settings', {}) - self.run_settings = settings.get('run_settings', {}) - self.can_bootstrap = settings['can_bootstrap'] - self.release = settings.get('release', None) + def __init__(self, name, settings): + self.name = name + self.settings = settings + self.build_settings = settings.get('build_settings', {}) + self.run_settings = settings.get('run_settings', {}) + self.can_bootstrap = settings['can_bootstrap'] + self.release = settings.get('release', None) - def apply_build_settings(self, manifest_data): - if manifest_data['provider']['name'] == 'virtualbox' and 'guest_additions' in manifest_data['provider']: - manifest_data['provider']['guest_additions'] = self.build_settings['guest_additions'] - if 'apt_proxy' in self.build_settings: - manifest_data.get('plugins', {})['apt_proxy'] = self.build_settings['apt_proxy'] - if 'ec2-credentials' in self.build_settings: - if 'credentials' not in manifest_data['provider']: - manifest_data['provider']['credentials'] = {} - for key in ['access-key', 'secret-key', 'certificate', 'private-key', 'user-id']: - if key in self.build_settings['ec2-credentials']: - manifest_data['provider']['credentials'][key] = self.build_settings['ec2-credentials'][key] - if 's3-region' in self.build_settings and manifest_data['volume']['backing'] == 's3': - if 'region' not in manifest_data['image']: - manifest_data['image']['region'] = self.build_settings['s3-region'] - return manifest_data + def apply_build_settings(self, manifest_data): + if manifest_data['provider']['name'] == 'virtualbox' and 'guest_additions' in manifest_data['provider']: + manifest_data['provider']['guest_additions'] = self.build_settings['guest_additions'] + if 'apt_proxy' in self.build_settings: + manifest_data.get('plugins', {})['apt_proxy'] = self.build_settings['apt_proxy'] + if 'ec2-credentials' in self.build_settings: + if 'credentials' not in manifest_data['provider']: + manifest_data['provider']['credentials'] = {} + for key in ['access-key', 'secret-key', 'certificate', 'private-key', 'user-id']: + if key in self.build_settings['ec2-credentials']: + manifest_data['provider']['credentials'][key] = self.build_settings['ec2-credentials'][key] + if 's3-region' in self.build_settings and manifest_data['volume']['backing'] == 's3': + if 'region' not in manifest_data['image']: + manifest_data['image']['region'] = self.build_settings['s3-region'] + return manifest_data diff --git a/bootstrapvz/remote/build_servers/callback.py b/bootstrapvz/remote/build_servers/callback.py index 2df5b82..40223ba 100644 --- a/bootstrapvz/remote/build_servers/callback.py +++ b/bootstrapvz/remote/build_servers/callback.py @@ -7,31 +7,31 @@ log = logging.getLogger(__name__) class CallbackServer(object): - def __init__(self, listen_port, remote_port): - self.daemon = Pyro4.Daemon(host='localhost', port=listen_port, - nathost='localhost', natport=remote_port, - unixsocket=None) - self.daemon.register(self) + def __init__(self, listen_port, remote_port): + self.daemon = Pyro4.Daemon(host='localhost', port=listen_port, + nathost='localhost', natport=remote_port, + unixsocket=None) + self.daemon.register(self) - def __enter__(self): - def serve(): - self.daemon.requestLoop() - from threading import Thread - self.thread = Thread(target=serve) - log.debug('Starting callback server') - self.thread.start() - return self + def __enter__(self): + def serve(): + self.daemon.requestLoop() + from threading import Thread + self.thread = Thread(target=serve) + log.debug('Starting callback server') + self.thread.start() + return self - def __exit__(self, type, value, traceback): - log.debug('Shutting down callback server') - self.daemon.shutdown() - self.thread.join() + def __exit__(self, type, value, traceback): + log.debug('Shutting down callback server') + self.daemon.shutdown() + self.thread.join() - @Pyro4.expose - def handle_log(self, pickled_record): - import pickle - record = pickle.loads(pickled_record) - log = logging.getLogger() - record.extra = getattr(record, 'extra', {}) - record.extra['source'] = 'remote' - log.handle(record) + @Pyro4.expose + def handle_log(self, pickled_record): + import pickle + record = pickle.loads(pickled_record) + log = logging.getLogger() + record.extra = getattr(record, 'extra', {}) + record.extra['source'] = 'remote' + log.handle(record) diff --git a/bootstrapvz/remote/build_servers/local.py b/bootstrapvz/remote/build_servers/local.py index 0d29943..db2dec6 100644 --- a/bootstrapvz/remote/build_servers/local.py +++ b/bootstrapvz/remote/build_servers/local.py @@ -4,13 +4,13 @@ from contextlib import contextmanager class LocalBuildServer(BuildServer): - @contextmanager - def connect(self): - yield LocalConnection() + @contextmanager + def connect(self): + yield LocalConnection() class LocalConnection(object): - def run(self, *args, **kwargs): - from bootstrapvz.base.main import run - return run(*args, **kwargs) + def run(self, *args, **kwargs): + from bootstrapvz.base.main import run + return run(*args, **kwargs) diff --git a/bootstrapvz/remote/build_servers/remote.py b/bootstrapvz/remote/build_servers/remote.py index c117c64..f6b5449 100644 --- a/bootstrapvz/remote/build_servers/remote.py +++ b/bootstrapvz/remote/build_servers/remote.py @@ -7,124 +7,124 @@ log = logging.getLogger(__name__) class RemoteBuildServer(BuildServer): - def __init__(self, name, settings): - super(RemoteBuildServer, self).__init__(name, settings) - self.address = settings['address'] - self.port = settings['port'] - self.username = settings['username'] - self.password = settings.get('password', None) - self.keyfile = settings['keyfile'] - self.server_bin = settings['server_bin'] + def __init__(self, name, settings): + super(RemoteBuildServer, self).__init__(name, settings) + self.address = settings['address'] + self.port = settings['port'] + self.username = settings['username'] + self.password = settings.get('password', None) + self.keyfile = settings['keyfile'] + self.server_bin = settings['server_bin'] - @contextmanager - def connect(self): - with self.spawn_server() as forwards: - args = {'listen_port': forwards['local_callback_port'], - 'remote_port': forwards['remote_callback_port']} - from callback import CallbackServer - with CallbackServer(**args) as callback_server: - with connect_pyro('localhost', forwards['local_server_port']) as connection: - connection.set_callback_server(callback_server) - yield connection + @contextmanager + def connect(self): + with self.spawn_server() as forwards: + args = {'listen_port': forwards['local_callback_port'], + 'remote_port': forwards['remote_callback_port']} + from callback import CallbackServer + with CallbackServer(**args) as callback_server: + with connect_pyro('localhost', forwards['local_server_port']) as connection: + connection.set_callback_server(callback_server) + yield connection - @contextmanager - def spawn_server(self): - from . import getNPorts - # We can't use :0 for the forwarding ports because - # A: It's quite hard to retrieve the port on the remote after the daemon has started - # B: SSH doesn't accept 0:localhost:0 as a port forwarding option - [local_server_port, local_callback_port] = getNPorts(2) - [remote_server_port, remote_callback_port] = getNPorts(2) + @contextmanager + def spawn_server(self): + from . import getNPorts + # We can't use :0 for the forwarding ports because + # A: It's quite hard to retrieve the port on the remote after the daemon has started + # B: SSH doesn't accept 0:localhost:0 as a port forwarding option + [local_server_port, local_callback_port] = getNPorts(2) + [remote_server_port, remote_callback_port] = getNPorts(2) - server_cmd = ['sudo', self.server_bin, '--listen', str(remote_server_port)] + server_cmd = ['sudo', self.server_bin, '--listen', str(remote_server_port)] - def set_process_group(): - # Changes the process group of a command so that any SIGINT - # for the main thread will not be propagated to it. - # We'd like to handle SIGINT ourselves (i.e. propagate the shutdown to the serverside) - import os - os.setpgrp() + def set_process_group(): + # Changes the process group of a command so that any SIGINT + # for the main thread will not be propagated to it. + # We'd like to handle SIGINT ourselves (i.e. propagate the shutdown to the serverside) + import os + os.setpgrp() - addr_arg = '{user}@{host}'.format(user=self.username, host=self.address) - ssh_cmd = ['ssh', '-i', self.keyfile, - '-p', str(self.port), - '-L' + str(local_server_port) + ':localhost:' + str(remote_server_port), - '-R' + str(remote_callback_port) + ':localhost:' + str(local_callback_port), - addr_arg] - full_cmd = ssh_cmd + ['--'] + server_cmd + addr_arg = '{user}@{host}'.format(user=self.username, host=self.address) + ssh_cmd = ['ssh', '-i', self.keyfile, + '-p', str(self.port), + '-L' + str(local_server_port) + ':localhost:' + str(remote_server_port), + '-R' + str(remote_callback_port) + ':localhost:' + str(local_callback_port), + addr_arg] + full_cmd = ssh_cmd + ['--'] + server_cmd - log.debug('Opening SSH connection to build server `{name}\''.format(name=self.name)) - import sys - import subprocess - ssh_process = subprocess.Popen(args=full_cmd, stdout=sys.stderr, stderr=sys.stderr, - preexec_fn=set_process_group) - try: - yield {'local_server_port': local_server_port, - 'local_callback_port': local_callback_port, - 'remote_server_port': remote_server_port, - 'remote_callback_port': remote_callback_port} - finally: - log.debug('Waiting for SSH connection to the build server to close') - import time - start = time.time() - while ssh_process.poll() is None: - if time.time() - start > 5: - log.debug('Forcefully terminating SSH connection to the build server') - ssh_process.terminate() - break - else: - time.sleep(0.5) + log.debug('Opening SSH connection to build server `{name}\''.format(name=self.name)) + import sys + import subprocess + ssh_process = subprocess.Popen(args=full_cmd, stdout=sys.stderr, stderr=sys.stderr, + preexec_fn=set_process_group) + try: + yield {'local_server_port': local_server_port, + 'local_callback_port': local_callback_port, + 'remote_server_port': remote_server_port, + 'remote_callback_port': remote_callback_port} + finally: + log.debug('Waiting for SSH connection to the build server to close') + import time + start = time.time() + while ssh_process.poll() is None: + if time.time() - start > 5: + log.debug('Forcefully terminating SSH connection to the build server') + ssh_process.terminate() + break + else: + time.sleep(0.5) - def download(self, src, dst): - log.debug('Downloading file `{src}\' from ' - 'build server `{name}\' to `{dst}\'' - .format(src=src, dst=dst, name=self.name)) - # Make sure we can read the file as {user} - self.remote_command(['sudo', 'chown', self.username, src]) - src_arg = '{user}@{host}:{path}'.format(user=self.username, host=self.address, path=src) - log_check_call(['scp', '-i', self.keyfile, '-P', str(self.port), - src_arg, dst]) + def download(self, src, dst): + log.debug('Downloading file `{src}\' from ' + 'build server `{name}\' to `{dst}\'' + .format(src=src, dst=dst, name=self.name)) + # Make sure we can read the file as {user} + self.remote_command(['sudo', 'chown', self.username, src]) + src_arg = '{user}@{host}:{path}'.format(user=self.username, host=self.address, path=src) + log_check_call(['scp', '-i', self.keyfile, '-P', str(self.port), + src_arg, dst]) - def delete(self, path): - log.debug('Deleting file `{path}\' on build server `{name}\''.format(path=path, name=self.name)) - self.remote_command(['sudo', 'rm', path]) + def delete(self, path): + log.debug('Deleting file `{path}\' on build server `{name}\''.format(path=path, name=self.name)) + self.remote_command(['sudo', 'rm', path]) - def remote_command(self, command): - ssh_cmd = ['ssh', '-i', self.keyfile, - '-p', str(self.port), - self.username + '@' + self.address, - '--'] + command - log_check_call(ssh_cmd) + def remote_command(self, command): + ssh_cmd = ['ssh', '-i', self.keyfile, + '-p', str(self.port), + self.username + '@' + self.address, + '--'] + command + log_check_call(ssh_cmd) @contextmanager def connect_pyro(host, port): - import Pyro4 - server_uri = 'PYRO:server@{host}:{port}'.format(host=host, port=port) - connection = Pyro4.Proxy(server_uri) + import Pyro4 + server_uri = 'PYRO:server@{host}:{port}'.format(host=host, port=port) + connection = Pyro4.Proxy(server_uri) - log.debug('Connecting to RPC daemon') + log.debug('Connecting to RPC daemon') - connected = False - try: - remaining_retries = 5 - while not connected: - try: - connection.ping() - connected = True - except (Pyro4.errors.ConnectionClosedError, Pyro4.errors.CommunicationError): - if remaining_retries > 0: - remaining_retries -= 1 - from time import sleep - sleep(2) - else: - raise + connected = False + try: + remaining_retries = 5 + while not connected: + try: + connection.ping() + connected = True + except (Pyro4.errors.ConnectionClosedError, Pyro4.errors.CommunicationError): + if remaining_retries > 0: + remaining_retries -= 1 + from time import sleep + sleep(2) + else: + raise - yield connection - finally: - if connected: - log.debug('Stopping RPC daemon') - connection.stop() - connection._pyroRelease() - else: - log.warn('Unable to stop RPC daemon, it might still be running on the server') + yield connection + finally: + if connected: + log.debug('Stopping RPC daemon') + connection.stop() + connection._pyroRelease() + else: + log.warn('Unable to stop RPC daemon, it might still be running on the server') diff --git a/bootstrapvz/remote/log.py b/bootstrapvz/remote/log.py index fc7d66d..1e59ff9 100644 --- a/bootstrapvz/remote/log.py +++ b/bootstrapvz/remote/log.py @@ -3,21 +3,21 @@ import logging class LogForwarder(logging.Handler): - def __init__(self, level=logging.NOTSET): - self.server = None - super(LogForwarder, self).__init__(level) + def __init__(self, level=logging.NOTSET): + self.server = None + super(LogForwarder, self).__init__(level) - def set_server(self, server): - self.server = server + def set_server(self, server): + self.server = server - def emit(self, record): - if self.server is not None: - if record.exc_info is not None: - import traceback - exc_type, exc_value, exc_traceback = record.exc_info - record.extra = getattr(record, 'extra', {}) - record.extra['traceback'] = traceback.format_exception(exc_type, exc_value, exc_traceback) - record.exc_info = None - # TODO: Use serpent instead - import pickle - self.server.handle_log(pickle.dumps(record)) + def emit(self, record): + if self.server is not None: + if record.exc_info is not None: + import traceback + exc_type, exc_value, exc_traceback = record.exc_info + record.extra = getattr(record, 'extra', {}) + record.extra['traceback'] = traceback.format_exception(exc_type, exc_value, exc_traceback) + record.exc_info = None + # TODO: Use serpent instead + import pickle + self.server.handle_log(pickle.dumps(record)) diff --git a/bootstrapvz/remote/main.py b/bootstrapvz/remote/main.py index 9613088..d48c98d 100644 --- a/bootstrapvz/remote/main.py +++ b/bootstrapvz/remote/main.py @@ -3,54 +3,54 @@ def main(): - """Main function for invoking the bootstrap process remotely - """ - # Get the commandline arguments - opts = get_opts() + """Main function for invoking the bootstrap process remotely + """ + # Get the commandline arguments + opts = get_opts() - from bootstrapvz.common.tools import load_data - # load the manifest data, we might want to modify it later on - manifest_data = load_data(opts['MANIFEST']) + from bootstrapvz.common.tools import load_data + # load the manifest data, we might want to modify it later on + manifest_data = load_data(opts['MANIFEST']) - # load the build servers file - build_servers = load_data(opts['--servers']) - # Pick a build server - from build_servers import pick_build_server - preferences = {} - if opts['--name'] is not None: - preferences['name'] = opts['--name'] - if opts['--release'] is not None: - preferences['release'] = opts['--release'] - build_server = pick_build_server(build_servers, manifest_data, preferences) + # load the build servers file + build_servers = load_data(opts['--servers']) + # Pick a build server + from build_servers import pick_build_server + preferences = {} + if opts['--name'] is not None: + preferences['name'] = opts['--name'] + if opts['--release'] is not None: + preferences['release'] = opts['--release'] + build_server = pick_build_server(build_servers, manifest_data, preferences) - # Apply the build server settings to the manifest (e.g. the virtualbox guest additions path) - manifest_data = build_server.apply_build_settings(manifest_data) + # Apply the build server settings to the manifest (e.g. the virtualbox guest additions path) + manifest_data = build_server.apply_build_settings(manifest_data) - # Load the manifest - from bootstrapvz.base.manifest import Manifest - manifest = Manifest(path=opts['MANIFEST'], data=manifest_data) + # Load the manifest + from bootstrapvz.base.manifest import Manifest + manifest = Manifest(path=opts['MANIFEST'], data=manifest_data) - # Set up logging - from bootstrapvz.base.main import setup_loggers - setup_loggers(opts) + # Set up logging + from bootstrapvz.base.main import setup_loggers + setup_loggers(opts) - # Register deserialization handlers for objects - # that will pass between server and client - from . import register_deserialization_handlers - register_deserialization_handlers() + # Register deserialization handlers for objects + # that will pass between server and client + from . import register_deserialization_handlers + register_deserialization_handlers() - # Everything has been set up, connect to the server and begin the bootstrapping process - with build_server.connect() as connection: - connection.run(manifest, - debug=opts['--debug'], - dry_run=opts['--dry-run']) + # Everything has been set up, connect to the server and begin the bootstrapping process + with build_server.connect() as connection: + connection.run(manifest, + debug=opts['--debug'], + dry_run=opts['--dry-run']) def get_opts(): - """Creates an argument parser and returns the arguments it has parsed - """ - from docopt import docopt - usage = """bootstrap-vz-remote + """Creates an argument parser and returns the arguments it has parsed + """ + from docopt import docopt + usage = """bootstrap-vz-remote Usage: bootstrap-vz-remote [options] --servers= MANIFEST @@ -66,5 +66,5 @@ Options: Colorize the console output [default: auto] --debug Print debugging information -h, --help show this help - """ - return docopt(usage) + """ + return docopt(usage) diff --git a/bootstrapvz/remote/server.py b/bootstrapvz/remote/server.py index bf37cbf..92b2095 100644 --- a/bootstrapvz/remote/server.py +++ b/bootstrapvz/remote/server.py @@ -6,37 +6,37 @@ log = logging.getLogger(__name__) def main(): - opts = getopts() - from . import register_deserialization_handlers - register_deserialization_handlers() - log_forwarder = setup_logging() - server = Server(opts['--listen'], log_forwarder) - server.start() + opts = getopts() + from . import register_deserialization_handlers + register_deserialization_handlers() + log_forwarder = setup_logging() + server = Server(opts['--listen'], log_forwarder) + server.start() def setup_logging(): - root = logging.getLogger() - root.setLevel(logging.NOTSET) + root = logging.getLogger() + root.setLevel(logging.NOTSET) - from log import LogForwarder - log_forwarder = LogForwarder() - root.addHandler(log_forwarder) + from log import LogForwarder + log_forwarder = LogForwarder() + root.addHandler(log_forwarder) - from datetime import datetime - import os.path - from bootstrapvz.base.log import get_file_handler - timestamp = datetime.now().strftime('%Y%m%d%H%M%S') - filename = '{timestamp}_remote.log'.format(timestamp=timestamp) - logfile_path = os.path.join('/var/log/bootstrap-vz', filename) - file_handler = get_file_handler(logfile_path, True) - root.addHandler(file_handler) + from datetime import datetime + import os.path + from bootstrapvz.base.log import get_file_handler + timestamp = datetime.now().strftime('%Y%m%d%H%M%S') + filename = '{timestamp}_remote.log'.format(timestamp=timestamp) + logfile_path = os.path.join('/var/log/bootstrap-vz', filename) + file_handler = get_file_handler(logfile_path, True) + root.addHandler(file_handler) - return log_forwarder + return log_forwarder def getopts(): - from docopt import docopt - usage = """bootstrap-vz-server + from docopt import docopt + usage = """bootstrap-vz-server Usage: bootstrap-vz-server [options] @@ -44,87 +44,87 @@ Options: --listen Serve on specified port [default: 46675] -h, --help show this help """ - return docopt(usage) + return docopt(usage) class Server(object): - def __init__(self, listen_port, log_forwarder): - self.stop_serving = False - self.log_forwarder = log_forwarder - self.listen_port = listen_port + def __init__(self, listen_port, log_forwarder): + self.stop_serving = False + self.log_forwarder = log_forwarder + self.listen_port = listen_port - def start(self): - Pyro4.config.COMMTIMEOUT = 0.5 - daemon = Pyro4.Daemon('localhost', port=int(self.listen_port), unixsocket=None) - daemon.register(self, 'server') + def start(self): + Pyro4.config.COMMTIMEOUT = 0.5 + daemon = Pyro4.Daemon('localhost', port=int(self.listen_port), unixsocket=None) + daemon.register(self, 'server') - daemon.requestLoop(loopCondition=lambda: not self.stop_serving) + daemon.requestLoop(loopCondition=lambda: not self.stop_serving) - @Pyro4.expose - def set_callback_server(self, server): - log.debug('Forwarding logs to the callback server') - self.log_forwarder.set_server(server) + @Pyro4.expose + def set_callback_server(self, server): + log.debug('Forwarding logs to the callback server') + self.log_forwarder.set_server(server) - @Pyro4.expose - def ping(self): - if hasattr(self, 'connection_timeout'): - self.connection_timeout.cancel() - del self.connection_timeout - return 'pong' + @Pyro4.expose + def ping(self): + if hasattr(self, 'connection_timeout'): + self.connection_timeout.cancel() + del self.connection_timeout + return 'pong' - @Pyro4.expose - def stop(self): - if hasattr(self, 'bootstrap_process'): - log.warn('Sending SIGINT to bootstrapping process') - import os - import signal - os.killpg(self.bootstrap_process.pid, signal.SIGINT) - self.bootstrap_process.join() + @Pyro4.expose + def stop(self): + if hasattr(self, 'bootstrap_process'): + log.warn('Sending SIGINT to bootstrapping process') + import os + import signal + os.killpg(self.bootstrap_process.pid, signal.SIGINT) + self.bootstrap_process.join() - # We can't send a SIGINT to the server, - # for some reason the Pyro4 shutdowns are rather unclean, - # throwing exceptions and such. - self.stop_serving = True + # We can't send a SIGINT to the server, + # for some reason the Pyro4 shutdowns are rather unclean, + # throwing exceptions and such. + self.stop_serving = True - @Pyro4.expose - def run(self, manifest, debug=False, dry_run=False): + @Pyro4.expose + def run(self, manifest, debug=False, dry_run=False): - def bootstrap(queue): - # setsid() creates a new session, making this process the group leader. - # We do that, so when the server calls killpg (kill process group) - # on us, it won't kill itself (this process was spawned from a - # thread under the server, meaning it's part of the same group). - # The process hierarchy looks like this: - # Pyro server (process - listening on a port) - # +- pool thread - # +- pool thread - # +- pool thread - # +- started thread (the one that got the "run()" call) - # L bootstrap() process (us) - # Calling setsid() also fixes another problem: - # SIGINTs sent to this process seem to be redirected - # to the process leader. Since there is a thread between - # us and the process leader, the signal will not be propagated - # (signals are not propagated to threads), this means that any - # subprocess we start (i.e. debootstrap) will not get a SIGINT. - import os - os.setsid() - from bootstrapvz.base.main import run - try: - bootstrap_info = run(manifest, debug=debug, dry_run=dry_run) - queue.put(bootstrap_info) - except (Exception, KeyboardInterrupt) as e: - queue.put(e) + def bootstrap(queue): + # setsid() creates a new session, making this process the group leader. + # We do that, so when the server calls killpg (kill process group) + # on us, it won't kill itself (this process was spawned from a + # thread under the server, meaning it's part of the same group). + # The process hierarchy looks like this: + # Pyro server (process - listening on a port) + # +- pool thread + # +- pool thread + # +- pool thread + # +- started thread (the one that got the "run()" call) + # L bootstrap() process (us) + # Calling setsid() also fixes another problem: + # SIGINTs sent to this process seem to be redirected + # to the process leader. Since there is a thread between + # us and the process leader, the signal will not be propagated + # (signals are not propagated to threads), this means that any + # subprocess we start (i.e. debootstrap) will not get a SIGINT. + import os + os.setsid() + from bootstrapvz.base.main import run + try: + bootstrap_info = run(manifest, debug=debug, dry_run=dry_run) + queue.put(bootstrap_info) + except (Exception, KeyboardInterrupt) as e: + queue.put(e) - from multiprocessing import Queue - from multiprocessing import Process - queue = Queue() - self.bootstrap_process = Process(target=bootstrap, args=(queue,)) - self.bootstrap_process.start() - self.bootstrap_process.join() - del self.bootstrap_process - result = queue.get() - if isinstance(result, Exception): - raise result - return result + from multiprocessing import Queue + from multiprocessing import Process + queue = Queue() + self.bootstrap_process = Process(target=bootstrap, args=(queue,)) + self.bootstrap_process.start() + self.bootstrap_process.join() + del self.bootstrap_process + result = queue.get() + if isinstance(result, Exception): + raise result + return result diff --git a/docs/conf.py b/docs/conf.py index 439eb30..594bac2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -108,9 +108,9 @@ pygments_style = 'sphinx' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -270,30 +270,30 @@ import glob import os.path for readme_path in glob.glob('../bootstrapvz/providers/*/README.rst'): - provider_name = os.path.basename(os.path.dirname(readme_path)) - include_path = os.path.join('providers', provider_name + '.rst') - if not os.path.exists(include_path): - path_to_readme = os.path.join('../../bootstrapvz/providers', provider_name, 'README.rst') - with open(include_path, 'w') as include: - include.write('.. include:: ' + path_to_readme) + provider_name = os.path.basename(os.path.dirname(readme_path)) + include_path = os.path.join('providers', provider_name + '.rst') + if not os.path.exists(include_path): + path_to_readme = os.path.join('../../bootstrapvz/providers', provider_name, 'README.rst') + with open(include_path, 'w') as include: + include.write('.. include:: ' + path_to_readme) for readme_path in glob.glob('../bootstrapvz/plugins/*/README.rst'): - plugin_name = os.path.basename(os.path.dirname(readme_path)) - include_path = os.path.join('plugins', plugin_name + '.rst') - if not os.path.exists(include_path): - path_to_readme = os.path.join('../../bootstrapvz/plugins', plugin_name, 'README.rst') - with open(include_path, 'w') as include: - include.write('.. include:: ' + path_to_readme) + plugin_name = os.path.basename(os.path.dirname(readme_path)) + include_path = os.path.join('plugins', plugin_name + '.rst') + if not os.path.exists(include_path): + path_to_readme = os.path.join('../../bootstrapvz/plugins', plugin_name, 'README.rst') + with open(include_path, 'w') as include: + include.write('.. include:: ' + path_to_readme) for readme_path in glob.glob('../tests/system/providers/*/README.rst'): - provider_name = os.path.basename(os.path.dirname(readme_path)) - include_path = os.path.join('testing/system_test_providers', provider_name + '.rst') - if not os.path.exists(include_path): - path_to_readme = os.path.join('../../../tests/system/providers', provider_name, 'README.rst') - with open(include_path, 'w') as include: - include.write('.. include:: ' + path_to_readme) + provider_name = os.path.basename(os.path.dirname(readme_path)) + include_path = os.path.join('testing/system_test_providers', provider_name + '.rst') + if not os.path.exists(include_path): + path_to_readme = os.path.join('../../../tests/system/providers', provider_name, 'README.rst') + with open(include_path, 'w') as include: + include.write('.. include:: ' + path_to_readme) # -- Create task overview graph data -------------------------------------- @@ -308,7 +308,7 @@ taskoverview.write_data(data, '_static/graph.json') if on_rtd: - pass + pass # Snatched from here: # https://sourcegraph.com/github.com/Gallopsled/pwntools@master/.PipPackage/pwntools/.def/docs/source/conf/linkcode_resolve/lines @@ -316,43 +316,43 @@ baseurl = 'https://github.com/andsens/bootstrap-vz' import subprocess try: - git_head = subprocess.check_output('git describe --tags 2>/dev/null', shell=True) + git_head = subprocess.check_output('git describe --tags 2>/dev/null', shell=True) except subprocess.CalledProcessError: - try: - git_head = subprocess.check_output('git rev-parse HEAD', shell=True).strip()[:10] - except subprocess.CalledProcessError: - pass + try: + git_head = subprocess.check_output('git rev-parse HEAD', shell=True).strip()[:10] + except subprocess.CalledProcessError: + pass def linkcode_resolve(domain, info): - if domain != 'py': - return None - if not info['module']: - return None + if domain != 'py': + return None + if not info['module']: + return None - filepath = info['module'].replace('.', '/') + '.py' - fmt_args = {'baseurl': baseurl, - 'commit': git_head, - 'path': filepath} + filepath = info['module'].replace('.', '/') + '.py' + fmt_args = {'baseurl': baseurl, + 'commit': git_head, + 'path': filepath} - import importlib - import inspect - import types - module = importlib.import_module(info['module']) - value = module - for part in info['fullname'].split('.'): - value = getattr(value, part, None) - if value is None: - break - valid_types = (types.ModuleType, types.ClassType, types.MethodType, - types.FunctionType, types.TracebackType, - types.FrameType, types.CodeType) - if isinstance(value, valid_types): - try: - lines, first = inspect.getsourcelines(value) - fmt_args['linestart'] = first - fmt_args['lineend'] = first + len(lines) - 1 - return '{baseurl}/blob/{commit}/{path}#L{linestart}-L{lineend}'.format(**fmt_args) - except IOError: - pass - return '{baseurl}/blob/{commit}/{path}'.format(**fmt_args) + import importlib + import inspect + import types + module = importlib.import_module(info['module']) + value = module + for part in info['fullname'].split('.'): + value = getattr(value, part, None) + if value is None: + break + valid_types = (types.ModuleType, types.ClassType, types.MethodType, + types.FunctionType, types.TracebackType, + types.FrameType, types.CodeType) + if isinstance(value, valid_types): + try: + lines, first = inspect.getsourcelines(value) + fmt_args['linestart'] = first + fmt_args['lineend'] = first + len(lines) - 1 + return '{baseurl}/blob/{commit}/{path}#L{linestart}-L{lineend}'.format(**fmt_args) + except IOError: + pass + return '{baseurl}/blob/{commit}/{path}'.format(**fmt_args) diff --git a/docs/developers/plugins.rst b/docs/developers/plugins.rst index d8a2524..a8ff8e0 100644 --- a/docs/developers/plugins.rst +++ b/docs/developers/plugins.rst @@ -14,8 +14,8 @@ This function adds tasks to be run to the tasklist: .. code-block:: python - def resolve_tasks(taskset, manifest): - taskset.add(tasks.DoSomething) + def resolve_tasks(taskset, manifest): + taskset.add(tasks.DoSomething) The manifest variable holds the manifest the user specified, with it you can determine settings for your plugin and e.g. @@ -25,15 +25,15 @@ A task is a class with a static ``run()`` function and some meta-information: .. code-block:: python - class DoSomething(Task): - description = 'Doing something' - phase = phases.volume_preparation - predecessors = [PartitionVolume] - successors = [filesystem.Format] + class DoSomething(Task): + description = 'Doing something' + phase = phases.volume_preparation + predecessors = [PartitionVolume] + successors = [filesystem.Format] - @classmethod - def run(cls, info): - pass + @classmethod + def run(cls, info): + pass To read more about tasks and their ordering, check out the section on `how bootstrap-vz works `__. @@ -48,8 +48,8 @@ you run it after an image has been successfully bootstrapped: .. code-block:: python - def resolve_rollback_tasks(taskset, manifest, completed, counter_task): - counter_task(taskset, tasks.DoSomething, tasks.UndoSomething) + def resolve_rollback_tasks(taskset, manifest, completed, counter_task): + counter_task(taskset, tasks.DoSomething, tasks.UndoSomething) In ``resolve_rollback_tasks()`` you have access to the taskset (this time it contains tasks that will be run during rollback), the manifest, and @@ -65,10 +65,10 @@ Typically it looks like this: .. code-block:: python - def validate_manifest(data, validator, error): - import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) - validator(data, schema_path) + def validate_manifest(data, validator, error): + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) This code validates the manifest against a schema in your plugin folder. The schema is a `JSON schema `__, since bootstrap-vz @@ -112,21 +112,21 @@ They integrate with bootstrap-vz by exposing an entry-point through ``setup.py`` .. code-block:: python - setup(name='example-plugin', - version=0.9.5, - packages=find_packages(), - include_package_data=True, - entry_points={'bootstrapvz.plugins': ['plugin_name = package_name.module_name']}, - install_requires=['bootstrap-vz >= 0.9.5'], - ) + setup(name='example-plugin', + version=0.9.5, + packages=find_packages(), + include_package_data=True, + entry_points={'bootstrapvz.plugins': ['plugin_name = package_name.module_name']}, + install_requires=['bootstrap-vz >= 0.9.5'], + ) Beyond ``setup.py`` the package might need a ``MANIFEST.in`` so that assets like ``manifest-schema.yml`` are included when the package is built: .. code-block:: text - include example/manifest-schema.yml - include example/README.rst + include example/manifest-schema.yml + include example/README.rst To test your package from source you can run ``python setup.py develop`` to register the package so that bootstrap-vz can find the entry-point of your @@ -143,4 +143,4 @@ using pip: .. code-block:: sh - pip install git+ssh://git@github.com/username/repo#egg=plugin_name + pip install git+ssh://git@github.com/username/repo#egg=plugin_name diff --git a/docs/developers/taskoverview.rst b/docs/developers/taskoverview.rst index 2e76e19..e1dc33d 100644 --- a/docs/developers/taskoverview.rst +++ b/docs/developers/taskoverview.rst @@ -4,12 +4,12 @@ Taskoverview .. raw:: html - - - - - - - + + + + + + + diff --git a/docs/taskoverview.py b/docs/taskoverview.py index fa6a29b..74f4b3c 100755 --- a/docs/taskoverview.py +++ b/docs/taskoverview.py @@ -6,77 +6,77 @@ sys.path.append(os.path.join(os.path.dirname(__file__), '..')) def generate_graph_data(): - import bootstrapvz.common.tasks - import bootstrapvz.providers - import bootstrapvz.plugins - from bootstrapvz.base.tasklist import get_all_tasks - tasks = get_all_tasks([bootstrapvz.common.tasks, bootstrapvz.providers, bootstrapvz.plugins]) + import bootstrapvz.common.tasks + import bootstrapvz.providers + import bootstrapvz.plugins + from bootstrapvz.base.tasklist import get_all_tasks + tasks = get_all_tasks([bootstrapvz.common.tasks, bootstrapvz.providers, bootstrapvz.plugins]) - def distinct(seq): - seen = set() - return [x for x in seq if x not in seen and not seen.add(x)] - modules = distinct([task.__module__ for task in tasks]) - task_links = [] - task_links.extend([{'source': task, - 'target': succ, - 'definer': task, - } - for task in tasks - for succ in task.successors]) - task_links.extend([{'source': pre, - 'target': task, - 'definer': task, - } - for task in tasks - for pre in task.predecessors]) + def distinct(seq): + seen = set() + return [x for x in seq if x not in seen and not seen.add(x)] + modules = distinct([task.__module__ for task in tasks]) + task_links = [] + task_links.extend([{'source': task, + 'target': succ, + 'definer': task, + } + for task in tasks + for succ in task.successors]) + task_links.extend([{'source': pre, + 'target': task, + 'definer': task, + } + for task in tasks + for pre in task.predecessors]) - def mk_phase(phase): - return {'name': phase.name, - 'description': phase.description, - } + def mk_phase(phase): + return {'name': phase.name, + 'description': phase.description, + } - def mk_module(module): - return {'name': module, - } + def mk_module(module): + return {'name': module, + } - from bootstrapvz.common import phases + from bootstrapvz.common import phases - def mk_node(task): - return {'name': task.__name__, - 'module': modules.index(task.__module__), - 'phase': (i for i, phase in enumerate(phases.order) if phase is task.phase).next(), - } + def mk_node(task): + return {'name': task.__name__, + 'module': modules.index(task.__module__), + 'phase': (i for i, phase in enumerate(phases.order) if phase is task.phase).next(), + } - def mk_link(link): - for key in ['source', 'target', 'definer']: - link[key] = tasks.index(link[key]) - return link + def mk_link(link): + for key in ['source', 'target', 'definer']: + link[key] = tasks.index(link[key]) + return link - return {'phases': map(mk_phase, phases.order), - 'modules': map(mk_module, modules), - 'nodes': map(mk_node, tasks), - 'links': map(mk_link, task_links)} + return {'phases': map(mk_phase, phases.order), + 'modules': map(mk_module, modules), + 'nodes': map(mk_node, tasks), + 'links': map(mk_link, task_links)} def write_data(data, output_path=None): - import json - if output_path is None: - import sys - json.dump(data, sys.stdout, indent=4, separators=(',', ': ')) - else: - with open(output_path, 'w') as output: - json.dump(data, output) + import json + if output_path is None: + import sys + json.dump(data, sys.stdout, indent=4, separators=(',', ': ')) + else: + with open(output_path, 'w') as output: + json.dump(data, output) if __name__ == '__main__' and __package__ is None: - from docopt import docopt - usage = """Usage: taskoverview.py [options] + from docopt import docopt + usage = """Usage: taskoverview.py [options] Options: --output output -h, --help show this help """ - opts = docopt(usage) + opts = docopt(usage) - data = generate_graph_data() - write_data(data, opts.get('--output', None)) + data = generate_graph_data() + write_data(data, opts.get('--output', None)) diff --git a/docs/transform_github_links.py b/docs/transform_github_links.py index 13536e2..9de8451 100644 --- a/docs/transform_github_links.py +++ b/docs/transform_github_links.py @@ -2,101 +2,101 @@ import re def setup(app): - app.connect('doctree-resolved', transform_github_links) - return {'version': '0.1'} + app.connect('doctree-resolved', transform_github_links) + return {'version': '0.1'} # Maps from files in docs/ to folders/files in repo includes_mapping = { - r'^index$': r'', - r'^(providers|plugins)/index$': r'bootstrapvz/\1/', - r'^(providers|plugins)/(?!index)([^/]+)$': r'bootstrapvz/\1/\2/', - r'^manifests/index$': r'manifest/', - r'^manifests/official_([^_]+)_manifests$': r'manifest/official/\1/', - r'^testing/index$': r'tests/', - r'^testing/(?!index)([^/]+)_tests$': r'tests/\1/', - r'^remote_bootstrapping$': r'bootstrapvz/remote/', - r'^developers/index$': r'bootstrapvz/', - r'^developers/contributing$': r'CONTRIBUTING.rst', - r'^developers/documentation$': r'docs/', - r'^changelog$': r'CHANGELOG.rst', + r'^index$': r'', + r'^(providers|plugins)/index$': r'bootstrapvz/\1/', + r'^(providers|plugins)/(?!index)([^/]+)$': r'bootstrapvz/\1/\2/', + r'^manifests/index$': r'manifest/', + r'^manifests/official_([^_]+)_manifests$': r'manifest/official/\1/', + r'^testing/index$': r'tests/', + r'^testing/(?!index)([^/]+)_tests$': r'tests/\1/', + r'^remote_bootstrapping$': r'bootstrapvz/remote/', + r'^developers/index$': r'bootstrapvz/', + r'^developers/contributing$': r'CONTRIBUTING.rst', + r'^developers/documentation$': r'docs/', + r'^changelog$': r'CHANGELOG.rst', } # Maps from links in repo to files/folders in docs/ links_mapping = { - r'^$': r'', - r'^bootstrapvz/(providers|plugins)$': r'\1', - r'^bootstrapvz/(providers|plugins)/([^/]+)$': r'\1/\2.html', - r'^tests$': r'testing', - r'^manifests$': r'manifests', - r'^manifests/official/([^/]+)$': r'manifests/official_\1_manifests.html', - r'^tests/([^/]+)$': r'testing/\1_tests.html', - r'^bootstrapvz/remote$': r'remote_bootstrapping.html', - r'^bootstrapvz$': r'developers', - r'^CONTRIBUTING\.rst$': r'developers/contributing.html', - r'^docs$': r'developers/documentation.html', - r'^CHANGELOG\.rst$': r'changelog.html', + r'^$': r'', + r'^bootstrapvz/(providers|plugins)$': r'\1', + r'^bootstrapvz/(providers|plugins)/([^/]+)$': r'\1/\2.html', + r'^tests$': r'testing', + r'^manifests$': r'manifests', + r'^manifests/official/([^/]+)$': r'manifests/official_\1_manifests.html', + r'^tests/([^/]+)$': r'testing/\1_tests.html', + r'^bootstrapvz/remote$': r'remote_bootstrapping.html', + r'^bootstrapvz$': r'developers', + r'^CONTRIBUTING\.rst$': r'developers/contributing.html', + r'^docs$': r'developers/documentation.html', + r'^CHANGELOG\.rst$': r'changelog.html', } for key, val in includes_mapping.items(): - del includes_mapping[key] - includes_mapping[re.compile(key)] = val + del includes_mapping[key] + includes_mapping[re.compile(key)] = val for key, val in links_mapping.items(): - del links_mapping[key] - links_mapping[re.compile(key)] = val + del links_mapping[key] + links_mapping[re.compile(key)] = val def find_original(path): - for key, val in includes_mapping.items(): - if re.match(key, path): - return re.sub(key, val, path) - return None + for key, val in includes_mapping.items(): + if re.match(key, path): + return re.sub(key, val, path) + return None def find_docs_link(link): - try: - # Preserve anchor when doing lookups - link, anchor = link.split('#', 1) - anchor = '#' + anchor - except ValueError: - # No anchor, keep the original link - anchor = '' - for key, val in links_mapping.items(): - if re.match(key, link): - return re.sub(key, val, link) + anchor - return None + try: + # Preserve anchor when doing lookups + link, anchor = link.split('#', 1) + anchor = '#' + anchor + except ValueError: + # No anchor, keep the original link + anchor = '' + for key, val in links_mapping.items(): + if re.match(key, link): + return re.sub(key, val, link) + anchor + return None def transform_github_links(app, doctree, fromdocname): - # Convert relative links in repo into relative links in docs. - # We do this by first figuring out whether the current document - # has been included from outside docs/ and only continue if so. - # Next we take the repo path matching the current document - # (lookup through 'includes_mapping'), tack the link onto the dirname - # of that path and normalize it using os.path.normpath. - # The result is the path to a document/folder in the repo. - # We then convert this path into one that works in the documentation - # (lookup through 'links_mapping'). - # If a mapping is found we, create a relative link from the current document. + # Convert relative links in repo into relative links in docs. + # We do this by first figuring out whether the current document + # has been included from outside docs/ and only continue if so. + # Next we take the repo path matching the current document + # (lookup through 'includes_mapping'), tack the link onto the dirname + # of that path and normalize it using os.path.normpath. + # The result is the path to a document/folder in the repo. + # We then convert this path into one that works in the documentation + # (lookup through 'links_mapping'). + # If a mapping is found we, create a relative link from the current document. - from docutils import nodes - import os.path - original_path = find_original(fromdocname) - if original_path is None: - return + from docutils import nodes + import os.path + original_path = find_original(fromdocname) + if original_path is None: + return - for node in doctree.traverse(nodes.reference): - if 'refuri' not in node: - continue - if node['refuri'].startswith('http'): - continue - abs_link = os.path.normpath(os.path.join(os.path.dirname(original_path), node['refuri'])) - docs_link = find_docs_link(abs_link) - if docs_link is None: - continue - # special handling for when we link inside the same document - if docs_link.startswith('#'): - node['refuri'] = docs_link - else: - node['refuri'] = os.path.relpath(docs_link, os.path.dirname(fromdocname)) + for node in doctree.traverse(nodes.reference): + if 'refuri' not in node: + continue + if node['refuri'].startswith('http'): + continue + abs_link = os.path.normpath(os.path.join(os.path.dirname(original_path), node['refuri'])) + docs_link = find_docs_link(abs_link) + if docs_link is None: + continue + # special handling for when we link inside the same document + if docs_link.startswith('#'): + node['refuri'] = docs_link + else: + node['refuri'] = os.path.relpath(docs_link, os.path.dirname(fromdocname)) diff --git a/setup.py b/setup.py index 3fdc632..bffe5bf 100644 --- a/setup.py +++ b/setup.py @@ -4,12 +4,12 @@ import os.path def find_version(path): - import re - version_file = open(path).read() - version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) - if version_match: - return version_match.group(1) - raise RuntimeError("Unable to find version string.") + import re + version_file = open(path).read() + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") setup(name='bootstrap-vz', version=find_version(os.path.join(os.path.dirname(__file__), 'bootstrapvz/__init__.py')), diff --git a/tests/README.rst b/tests/README.rst index ee59591..a1ec2cf 100644 --- a/tests/README.rst +++ b/tests/README.rst @@ -11,7 +11,7 @@ To run one specific test suite simply append the module path to tox: .. code-block:: sh - $ tox -e unit tests.unit.releases_tests + $ tox -e unit tests.unit.releases_tests Specific tests can be selected by appending the function name with a colon to the modulepath -- to run more than one tests, simply attach more arguments. @@ -19,4 +19,4 @@ to the modulepath -- to run more than one tests, simply attach more arguments. .. code-block:: sh - $ tox -e unit tests.unit.releases_tests:test_lt tests.unit.releases_tests:test_eq + $ tox -e unit tests.unit.releases_tests:test_lt tests.unit.releases_tests:test_eq diff --git a/tests/__init__.py b/tests/__init__.py index a0df8e2..cd41a18 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,8 +2,8 @@ # Snatched from: http://stackoverflow.com/a/2186565/339505 def recursive_glob(path, pattern): - import fnmatch - import os - for path, dirnames, filenames in os.walk(path): - for filename in fnmatch.filter(filenames, pattern): - yield os.path.join(path, filename) + import fnmatch + import os + for path, dirnames, filenames in os.walk(path): + for filename in fnmatch.filter(filenames, pattern): + yield os.path.join(path, filename) diff --git a/tests/integration/dry_run_tests.py b/tests/integration/dry_run_tests.py index 14fbbf7..0c2f1c8 100644 --- a/tests/integration/dry_run_tests.py +++ b/tests/integration/dry_run_tests.py @@ -1,26 +1,26 @@ def test_manifest_generator(): - """ - manifests_tests - test_manifest_generator. + """ + manifests_tests - test_manifest_generator. - Loops through the manifests directory and tests that - each file can successfully be loaded and validated. - """ + Loops through the manifests directory and tests that + each file can successfully be loaded and validated. + """ - from bootstrapvz.base.manifest import Manifest - from bootstrapvz.base.main import run + from bootstrapvz.base.manifest import Manifest + from bootstrapvz.base.main import run - def dry_run(path): - manifest = Manifest(path=path) - run(manifest, dry_run=True) + def dry_run(path): + manifest = Manifest(path=path) + run(manifest, dry_run=True) - import os.path - from .. import recursive_glob - from itertools import chain - manifests = os.path.join(os.path.dirname(os.path.realpath(__file__)), - '../../manifests') - manifest_paths = chain(recursive_glob(manifests, '*.yml'), recursive_glob(manifests, '*.json')) - for manifest_path in manifest_paths: - dry_run.description = "Dry-running %s" % os.path.relpath(manifest_path, manifests) - yield dry_run, manifest_path + import os.path + from .. import recursive_glob + from itertools import chain + manifests = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '../../manifests') + manifest_paths = chain(recursive_glob(manifests, '*.yml'), recursive_glob(manifests, '*.json')) + for manifest_path in manifest_paths: + dry_run.description = "Dry-running %s" % os.path.relpath(manifest_path, manifests) + yield dry_run, manifest_path diff --git a/tests/system/README.rst b/tests/system/README.rst index d77c68a..b5bdbbe 100644 --- a/tests/system/README.rst +++ b/tests/system/README.rst @@ -48,14 +48,14 @@ This allows code like this: .. code-block:: python - partials = {'vdi': '{provider: {name: virtualbox}, volume: {backing: vdi}}', - 'vmdk': '{provider: {name: virtualbox}, volume: {backing: vmdk}}', - } + partials = {'vdi': '{provider: {name: virtualbox}, volume: {backing: vdi}}', + 'vmdk': '{provider: {name: virtualbox}, volume: {backing: vmdk}}', + } - def test_unpartitioned_extlinux_oldstable(): - std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) + def test_unpartitioned_extlinux_oldstable(): + std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) The code above produces a manifest for Debian stable 64-bit unpartitioned virtualbox VMDK image. diff --git a/tests/system/docker_tests.py b/tests/system/docker_tests.py index cb0771f..720f584 100644 --- a/tests/system/docker_tests.py +++ b/tests/system/docker_tests.py @@ -22,8 +22,8 @@ volume: def test_stable(): - std_partials = ['base', 'stable64'] - custom_partials = [partials['docker']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print('\n'.join(instance.run(['echo', 'test']))) + std_partials = ['base', 'stable64'] + custom_partials = [partials['docker']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print('\n'.join(instance.run(['echo', 'test']))) diff --git a/tests/system/ec2_ebs_hvm_tests.py b/tests/system/ec2_ebs_hvm_tests.py index eab0c96..a517d65 100644 --- a/tests/system/ec2_ebs_hvm_tests.py +++ b/tests/system/ec2_ebs_hvm_tests.py @@ -14,117 +14,117 @@ volume: {backing: ebs} def test_unpartitioned_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_extlinux_stable(): - std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_extlinux_stable(): - std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_extlinux_stable(): - std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_grub_stable(): - std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['grub']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_grub_stable(): - std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['grub']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['extlinux']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_grub_unstable(): - std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['grub']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_grub_unstable(): - std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_hvm'], partials['grub']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't2.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/system/ec2_ebs_pvm_tests.py b/tests/system/ec2_ebs_pvm_tests.py index 3b54de1..cb006f5 100644 --- a/tests/system/ec2_ebs_pvm_tests.py +++ b/tests/system/ec2_ebs_pvm_tests.py @@ -13,81 +13,81 @@ volume: {backing: ebs} def test_unpartitioned_oldstable(): - std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_oldstable(): - std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_oldstable(): - std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_stable(): - std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_stable(): - std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_stable(): - std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_unstable(): - std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_msdos_unstable(): - std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_gpt_unstable(): - std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] - custom_partials = [partials['ebs_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 't1.micro'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/system/ec2_s3_pvm_tests.py b/tests/system/ec2_s3_pvm_tests.py index 4a17c20..51c0a2d 100644 --- a/tests/system/ec2_s3_pvm_tests.py +++ b/tests/system/ec2_s3_pvm_tests.py @@ -16,27 +16,27 @@ volume: {backing: s3} def test_unpartitioned_oldstable(): - std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['s3_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 'm1.small'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_stable(): - std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['s3_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 'm1.small'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) def test_unpartitioned_unstable(): - std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] - custom_partials = [partials['s3_pvm']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - boot_vars = {'instance_type': 'm1.small'} - with boot_manifest(manifest_data, boot_vars) as instance: - print(instance.get_console_output().output) + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/system/manifests/__init__.py b/tests/system/manifests/__init__.py index 7bdeb78..26b205d 100644 --- a/tests/system/manifests/__init__.py +++ b/tests/system/manifests/__init__.py @@ -9,11 +9,11 @@ partial_yaml = glob.glob(os.path.join(os.path.dirname(__file__), '*.json')) partials = {} for path in partial_json + partial_yaml: - key = os.path.splitext(os.path.basename(path))[0] - if key in partials: - msg = 'Error when loading partial manifests: The partial {key} exists twice'.format(key=key) - raise Exception(msg) - partials[key] = load_data(path) + key = os.path.splitext(os.path.basename(path))[0] + if key in partials: + msg = 'Error when loading partial manifests: The partial {key} exists twice'.format(key=key) + raise Exception(msg) + partials[key] = load_data(path) pool = string.ascii_uppercase + string.ascii_lowercase + string.digits random_password = ''.join(random.choice(pool) for _ in range(16)) @@ -21,34 +21,34 @@ partials['root_password']['plugins']['root_password']['password'] = random_passw def merge_manifest_data(standard_partials=[], custom=[]): - import yaml - manifest_data = [partials[name] for name in standard_partials] - manifest_data.extend(yaml.load(data) for data in custom) - return merge_dicts(*manifest_data) + import yaml + manifest_data = [partials[name] for name in standard_partials] + manifest_data.extend(yaml.load(data) for data in custom) + return merge_dicts(*manifest_data) # Snatched from here: http://stackoverflow.com/a/7205107 def merge_dicts(*args): - def clone(obj): - copy = obj - if isinstance(obj, dict): - copy = {key: clone(value) for key, value in obj.iteritems()} - if isinstance(obj, list): - copy = [clone(value) for value in obj] - if isinstance(obj, set): - copy = set([clone(value) for value in obj]) - return copy + def clone(obj): + copy = obj + if isinstance(obj, dict): + copy = {key: clone(value) for key, value in obj.iteritems()} + if isinstance(obj, list): + copy = [clone(value) for value in obj] + if isinstance(obj, set): + copy = set([clone(value) for value in obj]) + return copy - def merge(a, b, path=[]): - for key in b: - if key in a: - if isinstance(a[key], dict) and isinstance(b[key], dict): - merge(a[key], b[key], path + [str(key)]) - elif a[key] == b[key]: - pass - else: - raise Exception('Conflict at `{path}\''.format(path='.'.join(path + [str(key)]))) - else: - a[key] = clone(b[key]) - return a - return reduce(merge, args, {}) + def merge(a, b, path=[]): + for key in b: + if key in a: + if isinstance(a[key], dict) and isinstance(b[key], dict): + merge(a[key], b[key], path + [str(key)]) + elif a[key] == b[key]: + pass + else: + raise Exception('Conflict at `{path}\''.format(path='.'.join(path + [str(key)]))) + else: + a[key] = clone(b[key]) + return a + return reduce(merge, args, {}) diff --git a/tests/system/providers/docker/__init__.py b/tests/system/providers/docker/__init__.py index 85ad41b..9076039 100644 --- a/tests/system/providers/docker/__init__.py +++ b/tests/system/providers/docker/__init__.py @@ -5,53 +5,53 @@ log = logging.getLogger(__name__) @contextmanager def boot_image(manifest, build_server, bootstrap_info): - image_id = None - try: - import os - from bootstrapvz.common.tools import log_check_call - docker_machine = build_server.run_settings.get('docker', {}).get('machine', None) - docker_env = os.environ.copy() - if docker_machine is not None: - cmd = ('eval "$(docker-machine env {machine})" && ' - 'echo $DOCKER_HOST && echo $DOCKER_CERT_PATH && echo $DOCKER_TLS_VERIFY' - .format(machine=docker_machine)) - [docker_host, docker_cert_path, docker_tls] = log_check_call([cmd], shell=True) - docker_env['DOCKER_TLS_VERIFY'] = docker_tls - docker_env['DOCKER_HOST'] = docker_host - docker_env['DOCKER_CERT_PATH'] = docker_cert_path - docker_env['DOCKER_MACHINE_NAME'] = docker_machine - from bootstrapvz.remote.build_servers.local import LocalBuildServer - image_id = bootstrap_info._docker['image_id'] - if not isinstance(build_server, LocalBuildServer): - import tempfile - handle, image_path = tempfile.mkstemp() - os.close(handle) - remote_image_path = os.path.join('/tmp', image_id) - try: - log.debug('Saving remote image to file') - build_server.remote_command([ - 'sudo', 'docker', 'save', - '--output=' + remote_image_path, - image_id, - ]) - log.debug('Downloading remote image') - build_server.download(remote_image_path, image_path) - log.debug('Importing image') - log_check_call(['docker', 'load', '--input=' + image_path], env=docker_env) - except (Exception, KeyboardInterrupt): - raise - finally: - log.debug('Deleting exported image from build server and locally') - build_server.delete(remote_image_path) - os.remove(image_path) - log.debug('Deleting image from build server') - build_server.remote_command(['sudo', 'docker', 'rmi', - bootstrap_info._docker['image_id']]) + image_id = None + try: + import os + from bootstrapvz.common.tools import log_check_call + docker_machine = build_server.run_settings.get('docker', {}).get('machine', None) + docker_env = os.environ.copy() + if docker_machine is not None: + cmd = ('eval "$(docker-machine env {machine})" && ' + 'echo $DOCKER_HOST && echo $DOCKER_CERT_PATH && echo $DOCKER_TLS_VERIFY' + .format(machine=docker_machine)) + [docker_host, docker_cert_path, docker_tls] = log_check_call([cmd], shell=True) + docker_env['DOCKER_TLS_VERIFY'] = docker_tls + docker_env['DOCKER_HOST'] = docker_host + docker_env['DOCKER_CERT_PATH'] = docker_cert_path + docker_env['DOCKER_MACHINE_NAME'] = docker_machine + from bootstrapvz.remote.build_servers.local import LocalBuildServer + image_id = bootstrap_info._docker['image_id'] + if not isinstance(build_server, LocalBuildServer): + import tempfile + handle, image_path = tempfile.mkstemp() + os.close(handle) + remote_image_path = os.path.join('/tmp', image_id) + try: + log.debug('Saving remote image to file') + build_server.remote_command([ + 'sudo', 'docker', 'save', + '--output=' + remote_image_path, + image_id, + ]) + log.debug('Downloading remote image') + build_server.download(remote_image_path, image_path) + log.debug('Importing image') + log_check_call(['docker', 'load', '--input=' + image_path], env=docker_env) + except (Exception, KeyboardInterrupt): + raise + finally: + log.debug('Deleting exported image from build server and locally') + build_server.delete(remote_image_path) + os.remove(image_path) + log.debug('Deleting image from build server') + build_server.remote_command(['sudo', 'docker', 'rmi', + bootstrap_info._docker['image_id']]) - from image import Image - with Image(image_id, docker_env) as container: - yield container - finally: - if image_id is not None: - log.debug('Deleting image') - log_check_call(['docker', 'rmi', image_id], env=docker_env) + from image import Image + with Image(image_id, docker_env) as container: + yield container + finally: + if image_id is not None: + log.debug('Deleting image') + log_check_call(['docker', 'rmi', image_id], env=docker_env) diff --git a/tests/system/providers/docker/image.py b/tests/system/providers/docker/image.py index 12794f2..9ef29d1 100644 --- a/tests/system/providers/docker/image.py +++ b/tests/system/providers/docker/image.py @@ -5,51 +5,51 @@ log = logging.getLogger(__name__) class Image(object): - def __init__(self, image_id, docker_env): - self.image_id = image_id - self.docker_env = docker_env + def __init__(self, image_id, docker_env): + self.image_id = image_id + self.docker_env = docker_env - def __enter__(self): - self.container = Container(self.image_id, self.docker_env) - self.container.create() - try: - self.container.start() - except: - self.container.destroy() - raise - return self.container + def __enter__(self): + self.container = Container(self.image_id, self.docker_env) + self.container.create() + try: + self.container.start() + except: + self.container.destroy() + raise + return self.container - def __exit__(self, exc_type, exc_value, traceback): - try: - self.container.stop() - self.container.destroy() - except Exception as e: - log.exception(e) + def __exit__(self, exc_type, exc_value, traceback): + try: + self.container.stop() + self.container.destroy() + except Exception as e: + log.exception(e) class Container(object): - def __init__(self, image_id, docker_env): - self.image_id = image_id - self.docker_env = docker_env + def __init__(self, image_id, docker_env): + self.image_id = image_id + self.docker_env = docker_env - def create(self): - log.debug('Creating container') - [self.container_id] = log_check_call(['docker', 'create', '--tty=true', self.image_id], env=self.docker_env) + def create(self): + log.debug('Creating container') + [self.container_id] = log_check_call(['docker', 'create', '--tty=true', self.image_id], env=self.docker_env) - def start(self): - log.debug('Starting container') - log_check_call(['docker', 'start', self.container_id], env=self.docker_env) + def start(self): + log.debug('Starting container') + log_check_call(['docker', 'start', self.container_id], env=self.docker_env) - def run(self, command): - log.debug('Running command in container') - return log_check_call(['docker', 'exec', self.container_id] + command, env=self.docker_env) + def run(self, command): + log.debug('Running command in container') + return log_check_call(['docker', 'exec', self.container_id] + command, env=self.docker_env) - def stop(self): - log.debug('Stopping container') - log_check_call(['docker', 'stop', self.container_id], env=self.docker_env) + def stop(self): + log.debug('Stopping container') + log_check_call(['docker', 'stop', self.container_id], env=self.docker_env) - def destroy(self): - log.debug('Deleting container') - log_check_call(['docker', 'rm', self.container_id], env=self.docker_env) - del self.container_id + def destroy(self): + log.debug('Deleting container') + log_check_call(['docker', 'rm', self.container_id], env=self.docker_env) + del self.container_id diff --git a/tests/system/providers/ec2/__init__.py b/tests/system/providers/ec2/__init__.py index b1288e5..09c095a 100644 --- a/tests/system/providers/ec2/__init__.py +++ b/tests/system/providers/ec2/__init__.py @@ -6,134 +6,134 @@ log = logging.getLogger(__name__) @contextmanager def prepare_bootstrap(manifest, build_server): - if manifest.volume['backing'] == 's3': - credentials = {'access-key': build_server.build_settings['ec2-credentials']['access-key'], - 'secret-key': build_server.build_settings['ec2-credentials']['secret-key']} - from boto.s3 import connect_to_region as s3_connect - s3_connection = s3_connect(manifest.image['region'], - aws_access_key_id=credentials['access-key'], - aws_secret_access_key=credentials['secret-key']) - log.debug('Creating S3 bucket') - bucket = s3_connection.create_bucket(manifest.image['bucket'], location=manifest.image['region']) - try: - yield - finally: - log.debug('Deleting S3 bucket') - for item in bucket.list(): - bucket.delete_key(item.key) - s3_connection.delete_bucket(manifest.image['bucket']) - else: - yield + if manifest.volume['backing'] == 's3': + credentials = {'access-key': build_server.build_settings['ec2-credentials']['access-key'], + 'secret-key': build_server.build_settings['ec2-credentials']['secret-key']} + from boto.s3 import connect_to_region as s3_connect + s3_connection = s3_connect(manifest.image['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) + log.debug('Creating S3 bucket') + bucket = s3_connection.create_bucket(manifest.image['bucket'], location=manifest.image['region']) + try: + yield + finally: + log.debug('Deleting S3 bucket') + for item in bucket.list(): + bucket.delete_key(item.key) + s3_connection.delete_bucket(manifest.image['bucket']) + else: + yield @contextmanager def boot_image(manifest, build_server, bootstrap_info, instance_type=None): - credentials = {'access-key': build_server.run_settings['ec2-credentials']['access-key'], - 'secret-key': build_server.run_settings['ec2-credentials']['secret-key']} - from boto.ec2 import connect_to_region as ec2_connect - ec2_connection = ec2_connect(bootstrap_info._ec2['region'], - aws_access_key_id=credentials['access-key'], - aws_secret_access_key=credentials['secret-key']) - from boto.vpc import connect_to_region as vpc_connect - vpc_connection = vpc_connect(bootstrap_info._ec2['region'], - aws_access_key_id=credentials['access-key'], - aws_secret_access_key=credentials['secret-key']) + credentials = {'access-key': build_server.run_settings['ec2-credentials']['access-key'], + 'secret-key': build_server.run_settings['ec2-credentials']['secret-key']} + from boto.ec2 import connect_to_region as ec2_connect + ec2_connection = ec2_connect(bootstrap_info._ec2['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) + from boto.vpc import connect_to_region as vpc_connect + vpc_connection = vpc_connect(bootstrap_info._ec2['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) - if manifest.volume['backing'] == 'ebs': - from images import EBSImage - image = EBSImage(bootstrap_info._ec2['image'], ec2_connection) - if manifest.volume['backing'] == 's3': - from images import S3Image - image = S3Image(bootstrap_info._ec2['image'], ec2_connection) + if manifest.volume['backing'] == 'ebs': + from images import EBSImage + image = EBSImage(bootstrap_info._ec2['image'], ec2_connection) + if manifest.volume['backing'] == 's3': + from images import S3Image + image = S3Image(bootstrap_info._ec2['image'], ec2_connection) - try: - with run_instance(image, manifest, instance_type, ec2_connection, vpc_connection) as instance: - yield instance - finally: - image.destroy() + try: + with run_instance(image, manifest, instance_type, ec2_connection, vpc_connection) as instance: + yield instance + finally: + image.destroy() @contextmanager def run_instance(image, manifest, instance_type, ec2_connection, vpc_connection): - with create_env(ec2_connection, vpc_connection) as boot_env: + with create_env(ec2_connection, vpc_connection) as boot_env: - def waituntil_instance_is(state): - def instance_has_state(): - instance.update() - return instance.state == state - return waituntil(instance_has_state, timeout=600, interval=3) + def waituntil_instance_is(state): + def instance_has_state(): + instance.update() + return instance.state == state + return waituntil(instance_has_state, timeout=600, interval=3) - instance = None - try: - log.debug('Booting ec2 instance') - reservation = image.ami.run(instance_type=instance_type, - subnet_id=boot_env['subnet_id']) - [instance] = reservation.instances - instance.add_tag('Name', 'bootstrap-vz test instance') + instance = None + try: + log.debug('Booting ec2 instance') + reservation = image.ami.run(instance_type=instance_type, + subnet_id=boot_env['subnet_id']) + [instance] = reservation.instances + instance.add_tag('Name', 'bootstrap-vz test instance') - if not waituntil_instance_is('running'): - raise EC2InstanceStartupException('Timeout while booting instance') + if not waituntil_instance_is('running'): + raise EC2InstanceStartupException('Timeout while booting instance') - if not waituntil(lambda: instance.get_console_output().output is not None, timeout=600, interval=3): - raise EC2InstanceStartupException('Timeout while fetching console output') + if not waituntil(lambda: instance.get_console_output().output is not None, timeout=600, interval=3): + raise EC2InstanceStartupException('Timeout while fetching console output') - from bootstrapvz.common.releases import wheezy - if manifest.release <= wheezy: - termination_string = 'INIT: Entering runlevel: 2' - else: - termination_string = 'Debian GNU/Linux' + from bootstrapvz.common.releases import wheezy + if manifest.release <= wheezy: + termination_string = 'INIT: Entering runlevel: 2' + else: + termination_string = 'Debian GNU/Linux' - console_output = instance.get_console_output().output - if termination_string not in console_output: - last_lines = '\n'.join(console_output.split('\n')[-50:]) - message = ('The instance did not boot properly.\n' - 'Last 50 lines of console output:\n{output}'.format(output=last_lines)) - raise EC2InstanceStartupException(message) + console_output = instance.get_console_output().output + if termination_string not in console_output: + last_lines = '\n'.join(console_output.split('\n')[-50:]) + message = ('The instance did not boot properly.\n' + 'Last 50 lines of console output:\n{output}'.format(output=last_lines)) + raise EC2InstanceStartupException(message) - yield instance - finally: - if instance is not None: - log.debug('Terminating ec2 instance') - instance.terminate() - if not waituntil_instance_is('terminated'): - raise EC2InstanceStartupException('Timeout while terminating instance') - # wait a little longer, aws can be a little slow sometimes and think the instance is still running - import time - time.sleep(15) + yield instance + finally: + if instance is not None: + log.debug('Terminating ec2 instance') + instance.terminate() + if not waituntil_instance_is('terminated'): + raise EC2InstanceStartupException('Timeout while terminating instance') + # wait a little longer, aws can be a little slow sometimes and think the instance is still running + import time + time.sleep(15) @contextmanager def create_env(ec2_connection, vpc_connection): - vpc_cidr = '10.0.0.0/28' - subnet_cidr = '10.0.0.0/28' + vpc_cidr = '10.0.0.0/28' + subnet_cidr = '10.0.0.0/28' - @contextmanager - def vpc(): - log.debug('Creating VPC') - vpc = vpc_connection.create_vpc(vpc_cidr) - try: - yield vpc - finally: - log.debug('Deleting VPC') - vpc_connection.delete_vpc(vpc.id) + @contextmanager + def vpc(): + log.debug('Creating VPC') + vpc = vpc_connection.create_vpc(vpc_cidr) + try: + yield vpc + finally: + log.debug('Deleting VPC') + vpc_connection.delete_vpc(vpc.id) - @contextmanager - def subnet(vpc): - log.debug('Creating subnet') - subnet = vpc_connection.create_subnet(vpc.id, subnet_cidr) - try: - yield subnet - finally: - log.debug('Deleting subnet') - vpc_connection.delete_subnet(subnet.id) + @contextmanager + def subnet(vpc): + log.debug('Creating subnet') + subnet = vpc_connection.create_subnet(vpc.id, subnet_cidr) + try: + yield subnet + finally: + log.debug('Deleting subnet') + vpc_connection.delete_subnet(subnet.id) - with vpc() as _vpc: - with subnet(_vpc) as _subnet: - yield {'subnet_id': _subnet.id} + with vpc() as _vpc: + with subnet(_vpc) as _subnet: + yield {'subnet_id': _subnet.id} class EC2InstanceStartupException(Exception): - pass + pass diff --git a/tests/system/providers/ec2/images.py b/tests/system/providers/ec2/images.py index 6375f8a..442b4b6 100644 --- a/tests/system/providers/ec2/images.py +++ b/tests/system/providers/ec2/images.py @@ -4,24 +4,24 @@ log = logging.getLogger(__name__) class AmazonMachineImage(object): - def __init__(self, image_id, ec2_connection): - self.ec2_connection = ec2_connection - self.ami = self.ec2_connection.get_image(image_id) + def __init__(self, image_id, ec2_connection): + self.ec2_connection = ec2_connection + self.ami = self.ec2_connection.get_image(image_id) class EBSImage(AmazonMachineImage): - def destroy(self): - log.debug('Deleting AMI') - self.ami.deregister() - for device, block_device_type in self.ami.block_device_mapping.items(): - self.ec2_connection.delete_snapshot(block_device_type.snapshot_id) - del self.ami + def destroy(self): + log.debug('Deleting AMI') + self.ami.deregister() + for device, block_device_type in self.ami.block_device_mapping.items(): + self.ec2_connection.delete_snapshot(block_device_type.snapshot_id) + del self.ami class S3Image(AmazonMachineImage): - def destroy(self): - log.debug('Deleting AMI') - self.ami.deregister() - del self.ami + def destroy(self): + log.debug('Deleting AMI') + self.ami.deregister() + del self.ami diff --git a/tests/system/providers/virtualbox/__init__.py b/tests/system/providers/virtualbox/__init__.py index f9460da..9dc1bd4 100644 --- a/tests/system/providers/virtualbox/__init__.py +++ b/tests/system/providers/virtualbox/__init__.py @@ -5,51 +5,51 @@ log = logging.getLogger(__name__) @contextmanager def boot_image(manifest, build_server, bootstrap_info): - from bootstrapvz.remote.build_servers.local import LocalBuildServer - if isinstance(build_server, LocalBuildServer): - image_path = bootstrap_info.volume.image_path - else: - import tempfile - handle, image_path = tempfile.mkstemp() - import os - os.close(handle) - try: - build_server.download(bootstrap_info.volume.image_path, image_path) - except (Exception, KeyboardInterrupt): - os.remove(image_path) - raise - finally: - build_server.delete(bootstrap_info.volume.image_path) + from bootstrapvz.remote.build_servers.local import LocalBuildServer + if isinstance(build_server, LocalBuildServer): + image_path = bootstrap_info.volume.image_path + else: + import tempfile + handle, image_path = tempfile.mkstemp() + import os + os.close(handle) + try: + build_server.download(bootstrap_info.volume.image_path, image_path) + except (Exception, KeyboardInterrupt): + os.remove(image_path) + raise + finally: + build_server.delete(bootstrap_info.volume.image_path) - from image import VirtualBoxImage - image = VirtualBoxImage(image_path) + from image import VirtualBoxImage + image = VirtualBoxImage(image_path) - import hashlib - image_hash = hashlib.sha1(image_path).hexdigest() - instance_name = 'bootstrap-vz-{hash}'.format(hash=image_hash[:8]) + import hashlib + image_hash = hashlib.sha1(image_path).hexdigest() + instance_name = 'bootstrap-vz-{hash}'.format(hash=image_hash[:8]) - try: - image.open() - try: - with run_instance(image, instance_name, manifest) as instance: - yield instance - finally: - image.close() - finally: - image.destroy() + try: + image.open() + try: + with run_instance(image, instance_name, manifest) as instance: + yield instance + finally: + image.close() + finally: + image.destroy() @contextmanager def run_instance(image, instance_name, manifest): - from instance import VirtualBoxInstance - instance = VirtualBoxInstance(image, instance_name, - manifest.system['architecture'], manifest.release) - try: - instance.create() - try: - instance.boot() - yield instance - finally: - instance.shutdown() - finally: - instance.destroy() + from instance import VirtualBoxInstance + instance = VirtualBoxInstance(image, instance_name, + manifest.system['architecture'], manifest.release) + try: + instance.create() + try: + instance.boot() + yield instance + finally: + instance.shutdown() + finally: + instance.destroy() diff --git a/tests/system/providers/virtualbox/image.py b/tests/system/providers/virtualbox/image.py index 7d51eb8..d8d137a 100644 --- a/tests/system/providers/virtualbox/image.py +++ b/tests/system/providers/virtualbox/image.py @@ -5,23 +5,23 @@ log = logging.getLogger(__name__) class VirtualBoxImage(object): - def __init__(self, image_path): - self.image_path = image_path - self.vbox = virtualbox.VirtualBox() + def __init__(self, image_path): + self.image_path = image_path + self.vbox = virtualbox.VirtualBox() - def open(self): - log.debug('Opening vbox medium `{path}\''.format(path=self.image_path)) - self.medium = self.vbox.open_medium(self.image_path, # location - virtualbox.library.DeviceType.hard_disk, # device_type - virtualbox.library.AccessMode.read_only, # access_mode - False) # force_new_uuid + def open(self): + log.debug('Opening vbox medium `{path}\''.format(path=self.image_path)) + self.medium = self.vbox.open_medium(self.image_path, # location + virtualbox.library.DeviceType.hard_disk, # device_type + virtualbox.library.AccessMode.read_only, # access_mode + False) # force_new_uuid - def close(self): - log.debug('Closing vbox medium `{path}\''.format(path=self.image_path)) - self.medium.close() + def close(self): + log.debug('Closing vbox medium `{path}\''.format(path=self.image_path)) + self.medium.close() - def destroy(self): - log.debug('Deleting vbox image `{path}\''.format(path=self.image_path)) - import os - os.remove(self.image_path) - del self.image_path + def destroy(self): + log.debug('Deleting vbox image `{path}\''.format(path=self.image_path)) + import os + os.remove(self.image_path) + del self.image_path diff --git a/tests/system/providers/virtualbox/instance.py b/tests/system/providers/virtualbox/instance.py index d8d84e2..dab0925 100644 --- a/tests/system/providers/virtualbox/instance.py +++ b/tests/system/providers/virtualbox/instance.py @@ -7,115 +7,115 @@ log = logging.getLogger(__name__) class VirtualBoxInstance(object): - cpus = 1 - memory = 256 + cpus = 1 + memory = 256 - def __init__(self, image, name, arch, release): - self.image = image - self.name = name - self.arch = arch - self.release = release - self.vbox = virtualbox.VirtualBox() - manager = virtualbox.Manager() - self.session = manager.get_session() + def __init__(self, image, name, arch, release): + self.image = image + self.name = name + self.arch = arch + self.release = release + self.vbox = virtualbox.VirtualBox() + manager = virtualbox.Manager() + self.session = manager.get_session() - def create(self): - log.debug('Creating vbox machine `{name}\''.format(name=self.name)) - # create machine - os_type = {'x86': 'Debian', - 'amd64': 'Debian_64'}.get(self.arch) - self.machine = self.vbox.create_machine(settings_file='', name=self.name, - groups=[], os_type_id=os_type, flags='') - self.machine.cpu_count = self.cpus - self.machine.memory_size = self.memory - self.machine.save_settings() # save settings, so that we can register it - self.vbox.register_machine(self.machine) + def create(self): + log.debug('Creating vbox machine `{name}\''.format(name=self.name)) + # create machine + os_type = {'x86': 'Debian', + 'amd64': 'Debian_64'}.get(self.arch) + self.machine = self.vbox.create_machine(settings_file='', name=self.name, + groups=[], os_type_id=os_type, flags='') + self.machine.cpu_count = self.cpus + self.machine.memory_size = self.memory + self.machine.save_settings() # save settings, so that we can register it + self.vbox.register_machine(self.machine) - # attach image - log.debug('Attaching SATA storage controller to vbox machine `{name}\''.format(name=self.name)) - with lock(self.machine, self.session) as machine: - strg_ctrl = machine.add_storage_controller('SATA Controller', - virtualbox.library.StorageBus.sata) - strg_ctrl.port_count = 1 - machine.attach_device(name='SATA Controller', controller_port=0, device=0, - type_p=virtualbox.library.DeviceType.hard_disk, - medium=self.image.medium) - machine.save_settings() + # attach image + log.debug('Attaching SATA storage controller to vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + strg_ctrl = machine.add_storage_controller('SATA Controller', + virtualbox.library.StorageBus.sata) + strg_ctrl.port_count = 1 + machine.attach_device(name='SATA Controller', controller_port=0, device=0, + type_p=virtualbox.library.DeviceType.hard_disk, + medium=self.image.medium) + machine.save_settings() - # redirect serial port - log.debug('Enabling serial port on vbox machine `{name}\''.format(name=self.name)) - with lock(self.machine, self.session) as machine: - serial_port = machine.get_serial_port(0) - serial_port.enabled = True - import tempfile - handle, self.serial_port_path = tempfile.mkstemp() - import os - os.close(handle) - serial_port.path = self.serial_port_path - serial_port.host_mode = virtualbox.library.PortMode.host_pipe - serial_port.server = True # Create the socket on startup - machine.save_settings() + # redirect serial port + log.debug('Enabling serial port on vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + serial_port = machine.get_serial_port(0) + serial_port.enabled = True + import tempfile + handle, self.serial_port_path = tempfile.mkstemp() + import os + os.close(handle) + serial_port.path = self.serial_port_path + serial_port.host_mode = virtualbox.library.PortMode.host_pipe + serial_port.server = True # Create the socket on startup + machine.save_settings() - def boot(self): - log.debug('Booting vbox machine `{name}\''.format(name=self.name)) - self.machine.launch_vm_process(self.session, 'headless').wait_for_completion(-1) - from tests.system.tools import read_from_socket - # Gotta figure out a more reliable way to check when the system is done booting. - # Maybe bootstrapped unit test images should have a startup script that issues - # a callback to the host. - from bootstrapvz.common.releases import wheezy - if self.release <= wheezy: - termination_string = 'INIT: Entering runlevel: 2' - else: - termination_string = 'Debian GNU/Linux' - self.console_output = read_from_socket(self.serial_port_path, termination_string, 120) + def boot(self): + log.debug('Booting vbox machine `{name}\''.format(name=self.name)) + self.machine.launch_vm_process(self.session, 'headless').wait_for_completion(-1) + from tests.system.tools import read_from_socket + # Gotta figure out a more reliable way to check when the system is done booting. + # Maybe bootstrapped unit test images should have a startup script that issues + # a callback to the host. + from bootstrapvz.common.releases import wheezy + if self.release <= wheezy: + termination_string = 'INIT: Entering runlevel: 2' + else: + termination_string = 'Debian GNU/Linux' + self.console_output = read_from_socket(self.serial_port_path, termination_string, 120) - def shutdown(self): - log.debug('Shutting down vbox machine `{name}\''.format(name=self.name)) - self.session.console.power_down().wait_for_completion(-1) - if not waituntil(lambda: self.machine.session_state == virtualbox.library.SessionState.unlocked): - raise LockingException('Timeout while waiting for the machine to become unlocked') + def shutdown(self): + log.debug('Shutting down vbox machine `{name}\''.format(name=self.name)) + self.session.console.power_down().wait_for_completion(-1) + if not waituntil(lambda: self.machine.session_state == virtualbox.library.SessionState.unlocked): + raise LockingException('Timeout while waiting for the machine to become unlocked') - def destroy(self): - log.debug('Destroying vbox machine `{name}\''.format(name=self.name)) - if hasattr(self, 'machine'): - try: - log.debug('Detaching SATA storage controller from vbox machine `{name}\''.format(name=self.name)) - with lock(self.machine, self.session) as machine: - machine.detach_device(name='SATA Controller', controller_port=0, device=0) - machine.save_settings() - except virtualbox.library.VBoxErrorObjectNotFound: - pass - log.debug('Unregistering and removing vbox machine `{name}\''.format(name=self.name)) - self.machine.unregister(virtualbox.library.CleanupMode.unregister_only) - self.machine.remove(delete=True) - else: - log.debug('vbox machine `{name}\' was not created, skipping destruction'.format(name=self.name)) + def destroy(self): + log.debug('Destroying vbox machine `{name}\''.format(name=self.name)) + if hasattr(self, 'machine'): + try: + log.debug('Detaching SATA storage controller from vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + machine.detach_device(name='SATA Controller', controller_port=0, device=0) + machine.save_settings() + except virtualbox.library.VBoxErrorObjectNotFound: + pass + log.debug('Unregistering and removing vbox machine `{name}\''.format(name=self.name)) + self.machine.unregister(virtualbox.library.CleanupMode.unregister_only) + self.machine.remove(delete=True) + else: + log.debug('vbox machine `{name}\' was not created, skipping destruction'.format(name=self.name)) @contextmanager def lock(machine, session): - if machine.session_state != virtualbox.library.SessionState.unlocked: - msg = ('Acquiring lock on machine failed, state was `{state}\' ' - 'instead of `Unlocked\'.'.format(state=str(machine.session_state))) - raise LockingException(msg) + if machine.session_state != virtualbox.library.SessionState.unlocked: + msg = ('Acquiring lock on machine failed, state was `{state}\' ' + 'instead of `Unlocked\'.'.format(state=str(machine.session_state))) + raise LockingException(msg) - machine.lock_machine(session, virtualbox.library.LockType.write) - yield session.machine + machine.lock_machine(session, virtualbox.library.LockType.write) + yield session.machine - if machine.session_state != virtualbox.library.SessionState.locked: - if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): - msg = ('Error before trying to release lock on machine, state was `{state}\' ' - 'instead of `Locked\'.'.format(state=str(machine.session_state))) - raise LockingException(msg) + if machine.session_state != virtualbox.library.SessionState.locked: + if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): + msg = ('Error before trying to release lock on machine, state was `{state}\' ' + 'instead of `Locked\'.'.format(state=str(machine.session_state))) + raise LockingException(msg) - session.unlock_machine() + session.unlock_machine() - if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): - msg = ('Timeout while trying to release lock on machine, ' - 'last state was `{state}\''.format(state=str(machine.session_state))) - raise LockingException(msg) + if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): + msg = ('Timeout while trying to release lock on machine, ' + 'last state was `{state}\''.format(state=str(machine.session_state))) + raise LockingException(msg) class LockingException(Exception): - pass + pass diff --git a/tests/system/tools/__init__.py b/tests/system/tools/__init__.py index 7f3b995..5af0a67 100644 --- a/tests/system/tools/__init__.py +++ b/tests/system/tools/__init__.py @@ -10,80 +10,80 @@ register_deserialization_handlers() @contextmanager def boot_manifest(manifest_data, boot_vars={}): - from bootstrapvz.common.tools import load_data - build_servers = load_data('build-servers.yml') - from bootstrapvz.remote.build_servers import pick_build_server - build_server = pick_build_server(build_servers, manifest_data) + from bootstrapvz.common.tools import load_data + build_servers = load_data('build-servers.yml') + from bootstrapvz.remote.build_servers import pick_build_server + build_server = pick_build_server(build_servers, manifest_data) - manifest_data = build_server.apply_build_settings(manifest_data) - from bootstrapvz.base.manifest import Manifest - manifest = Manifest(data=manifest_data) + manifest_data = build_server.apply_build_settings(manifest_data) + from bootstrapvz.base.manifest import Manifest + manifest = Manifest(data=manifest_data) - import importlib - provider_module = importlib.import_module('tests.system.providers.' + manifest.provider['name']) + import importlib + provider_module = importlib.import_module('tests.system.providers.' + manifest.provider['name']) - prepare_bootstrap = getattr(provider_module, 'prepare_bootstrap', noop) - with prepare_bootstrap(manifest, build_server): - bootstrap_info = None - log.info('Connecting to build server') - with build_server.connect() as connection: - log.info('Building manifest') - bootstrap_info = connection.run(manifest) + prepare_bootstrap = getattr(provider_module, 'prepare_bootstrap', noop) + with prepare_bootstrap(manifest, build_server): + bootstrap_info = None + log.info('Connecting to build server') + with build_server.connect() as connection: + log.info('Building manifest') + bootstrap_info = connection.run(manifest) - log.info('Creating and booting instance') - with provider_module.boot_image(manifest, build_server, bootstrap_info, **boot_vars) as instance: - yield instance + log.info('Creating and booting instance') + with provider_module.boot_image(manifest, build_server, bootstrap_info, **boot_vars) as instance: + yield instance def waituntil(predicate, timeout=5, interval=0.05): - import time - threshhold = time.time() + timeout - while time.time() < threshhold: - if predicate(): - return True - time.sleep(interval) - return False + import time + threshhold = time.time() + timeout + while time.time() < threshhold: + if predicate(): + return True + time.sleep(interval) + return False def read_from_socket(socket_path, termination_string, timeout, read_timeout=0.5): - import socket - import select - import errno - console = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - console.connect(socket_path) - console.setblocking(0) + import socket + import select + import errno + console = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + console.connect(socket_path) + console.setblocking(0) - from timeit import default_timer - start = default_timer() + from timeit import default_timer + start = default_timer() - output = '' - ptr = 0 - continue_select = True - while continue_select: - read_ready, _, _ = select.select([console], [], [], read_timeout) - if console in read_ready: - while True: - try: - output += console.recv(1024) - if termination_string in output[ptr:]: - continue_select = False - else: - ptr = len(output) - len(termination_string) - break - except socket.error, e: - if e.errno != errno.EWOULDBLOCK: - raise Exception(e) - continue_select = False - if default_timer() - start > timeout: - from exceptions import SocketReadTimeout - msg = ('Reading from socket `{path}\' timed out after {seconds} seconds.\n' - 'Here is the output so far:\n{output}' - .format(path=socket_path, seconds=timeout, output=output)) - raise SocketReadTimeout(msg) - console.close() - return output + output = '' + ptr = 0 + continue_select = True + while continue_select: + read_ready, _, _ = select.select([console], [], [], read_timeout) + if console in read_ready: + while True: + try: + output += console.recv(1024) + if termination_string in output[ptr:]: + continue_select = False + else: + ptr = len(output) - len(termination_string) + break + except socket.error, e: + if e.errno != errno.EWOULDBLOCK: + raise Exception(e) + continue_select = False + if default_timer() - start > timeout: + from exceptions import SocketReadTimeout + msg = ('Reading from socket `{path}\' timed out after {seconds} seconds.\n' + 'Here is the output so far:\n{output}' + .format(path=socket_path, seconds=timeout, output=output)) + raise SocketReadTimeout(msg) + console.close() + return output @contextmanager def noop(*args, **kwargs): - yield + yield diff --git a/tests/system/tools/exceptions.py b/tests/system/tools/exceptions.py index 830c625..ad4b5ee 100644 --- a/tests/system/tools/exceptions.py +++ b/tests/system/tools/exceptions.py @@ -1,4 +1,4 @@ class SocketReadTimeout(Exception): - pass + pass diff --git a/tests/system/virtualbox_tests.py b/tests/system/virtualbox_tests.py index 86f3fac..62883d3 100644 --- a/tests/system/virtualbox_tests.py +++ b/tests/system/virtualbox_tests.py @@ -7,104 +7,104 @@ partials = {'vdi': '{provider: {name: virtualbox}, volume: {backing: vdi}}', def test_unpartitioned_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'extlinux', 'unpartitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'oldstable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_msdos_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'oldstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_gpt_extlinux_oldstable(): - std_partials = ['base', 'oldstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'oldstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_unpartitioned_extlinux_stable(): - std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_msdos_extlinux_stable(): - std_partials = ['base', 'stable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'stable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_gpt_extlinux_stable(): - std_partials = ['base', 'stable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'stable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_msdos_grub_stable(): - std_partials = ['base', 'stable64', 'grub', 'msdos', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'stable64', 'grub', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_gpt_grub_stable(): - std_partials = ['base', 'stable64', 'grub', 'gpt', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'stable64', 'grub', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_unpartitioned_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'extlinux', 'unpartitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'unstable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_msdos_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'unstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_gpt_extlinux_unstable(): - std_partials = ['base', 'unstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'unstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_msdos_grub_unstable(): - std_partials = ['base', 'unstable64', 'grub', 'msdos', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'unstable64', 'grub', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) def test_gpt_grub_unstable(): - std_partials = ['base', 'unstable64', 'grub', 'gpt', 'partitioned', 'root_password'] - custom_partials = [partials['vmdk']] - manifest_data = merge_manifest_data(std_partials, custom_partials) - with boot_manifest(manifest_data) as instance: - print(instance.console_output) + std_partials = ['base', 'unstable64', 'grub', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) diff --git a/tests/unit/bytes_tests.py b/tests/unit/bytes_tests.py index 7c95797..252aa00 100644 --- a/tests/unit/bytes_tests.py +++ b/tests/unit/bytes_tests.py @@ -5,104 +5,104 @@ from bootstrapvz.common.exceptions import UnitError def test_lt(): - assert Bytes('1MiB') < Bytes('2MiB') + assert Bytes('1MiB') < Bytes('2MiB') def test_le(): - assert Bytes('1MiB') <= Bytes('2MiB') - assert Bytes('1MiB') <= Bytes('1MiB') + assert Bytes('1MiB') <= Bytes('2MiB') + assert Bytes('1MiB') <= Bytes('1MiB') def test_eq(): - eq_(Bytes('1MiB'), Bytes('1MiB')) + eq_(Bytes('1MiB'), Bytes('1MiB')) def test_neq(): - assert Bytes('15MiB') != Bytes('1MiB') + assert Bytes('15MiB') != Bytes('1MiB') def test_gt(): - assert Bytes('2MiB') > Bytes('1MiB') + assert Bytes('2MiB') > Bytes('1MiB') def test_ge(): - assert Bytes('2MiB') >= Bytes('1MiB') - assert Bytes('2MiB') >= Bytes('2MiB') + assert Bytes('2MiB') >= Bytes('1MiB') + assert Bytes('2MiB') >= Bytes('2MiB') def test_eq_unit(): - eq_(Bytes('1024MiB'), Bytes('1GiB')) + eq_(Bytes('1024MiB'), Bytes('1GiB')) def test_add(): - eq_(Bytes('2GiB'), Bytes('1GiB') + Bytes('1GiB')) + eq_(Bytes('2GiB'), Bytes('1GiB') + Bytes('1GiB')) def test_iadd(): - b = Bytes('1GiB') - b += Bytes('1GiB') - eq_(Bytes('2GiB'), b) + b = Bytes('1GiB') + b += Bytes('1GiB') + eq_(Bytes('2GiB'), b) def test_sub(): - eq_(Bytes('1GiB'), Bytes('2GiB') - Bytes('1GiB')) + eq_(Bytes('1GiB'), Bytes('2GiB') - Bytes('1GiB')) def test_isub(): - b = Bytes('2GiB') - b -= Bytes('1GiB') - eq_(Bytes('1GiB'), b) + b = Bytes('2GiB') + b -= Bytes('1GiB') + eq_(Bytes('1GiB'), b) def test_mul(): - eq_(Bytes('2GiB'), Bytes('1GiB') * 2) + eq_(Bytes('2GiB'), Bytes('1GiB') * 2) @raises(UnitError) def test_mul_bytes(): - Bytes('1GiB') * Bytes('1GiB') + Bytes('1GiB') * Bytes('1GiB') def test_imul(): - b = Bytes('1GiB') - b *= 2 - eq_(Bytes('2GiB'), b) + b = Bytes('1GiB') + b *= 2 + eq_(Bytes('2GiB'), b) def test_div(): - eq_(Bytes('1GiB'), Bytes('2GiB') / 2) + eq_(Bytes('1GiB'), Bytes('2GiB') / 2) def test_div_bytes(): - eq_(2, Bytes('2GiB') / Bytes('1GiB')) + eq_(2, Bytes('2GiB') / Bytes('1GiB')) def test_idiv(): - b = Bytes('2GiB') - b /= 2 - eq_(Bytes('1GiB'), b) + b = Bytes('2GiB') + b /= 2 + eq_(Bytes('1GiB'), b) def test_mod(): - eq_(Bytes('256MiB'), Bytes('1GiB') % Bytes('768MiB')) + eq_(Bytes('256MiB'), Bytes('1GiB') % Bytes('768MiB')) @raises(UnitError) def test_mod_int(): - Bytes('1GiB') % 768 + Bytes('1GiB') % 768 def test_imod(): - b = Bytes('1GiB') - b %= Bytes('768MiB') - eq_(Bytes('256MiB'), b) + b = Bytes('1GiB') + b %= Bytes('768MiB') + eq_(Bytes('256MiB'), b) @raises(UnitError) def test_imod_int(): - b = Bytes('1GiB') - b %= 5 + b = Bytes('1GiB') + b %= 5 def test_convert_int(): - eq_(pow(1024, 3), int(Bytes('1GiB'))) + eq_(pow(1024, 3), int(Bytes('1GiB'))) diff --git a/tests/unit/manifests_tests.py b/tests/unit/manifests_tests.py index 0894c6f..a7f5f3e 100644 --- a/tests/unit/manifests_tests.py +++ b/tests/unit/manifests_tests.py @@ -1,31 +1,31 @@ def test_manifest_generator(): - """ - manifests_tests - test_manifest_generator. + """ + manifests_tests - test_manifest_generator. - Loops through the manifests directory and tests that - each file can successfully be loaded and validated. - """ + Loops through the manifests directory and tests that + each file can successfully be loaded and validated. + """ - from nose.tools import assert_true - from bootstrapvz.base.manifest import Manifest + from nose.tools import assert_true + from bootstrapvz.base.manifest import Manifest - def validate_manifest(path): - manifest = Manifest(path=path) - assert_true(manifest.data) - assert_true(manifest.data['name']) - assert_true(manifest.data['provider']) - assert_true(manifest.data['bootstrapper']) - assert_true(manifest.data['volume']) - assert_true(manifest.data['system']) + def validate_manifest(path): + manifest = Manifest(path=path) + assert_true(manifest.data) + assert_true(manifest.data['name']) + assert_true(manifest.data['provider']) + assert_true(manifest.data['bootstrapper']) + assert_true(manifest.data['volume']) + assert_true(manifest.data['system']) - import os.path - from .. import recursive_glob - from itertools import chain - manifests = os.path.join(os.path.dirname(os.path.realpath(__file__)), - '../../manifests') - manifest_paths = chain(recursive_glob(manifests, '*.yml'), recursive_glob(manifests, '*.json')) - for manifest_path in manifest_paths: - validate_manifest.description = "Validating %s" % os.path.relpath(manifest_path, manifests) - yield validate_manifest, manifest_path + import os.path + from .. import recursive_glob + from itertools import chain + manifests = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '../../manifests') + manifest_paths = chain(recursive_glob(manifests, '*.yml'), recursive_glob(manifests, '*.json')) + for manifest_path in manifest_paths: + validate_manifest.description = "Validating %s" % os.path.relpath(manifest_path, manifests) + yield validate_manifest, manifest_path diff --git a/tests/unit/releases_tests.py b/tests/unit/releases_tests.py index df7d588..324fd5e 100644 --- a/tests/unit/releases_tests.py +++ b/tests/unit/releases_tests.py @@ -3,37 +3,37 @@ from bootstrapvz.common import releases def test_gt(): - assert releases.wheezy > releases.squeeze + assert releases.wheezy > releases.squeeze def test_lt(): - assert releases.wheezy < releases.stretch + assert releases.wheezy < releases.stretch def test_eq(): - assert releases.wheezy == releases.wheezy + assert releases.wheezy == releases.wheezy def test_neq(): - assert releases.wheezy != releases.jessie + assert releases.wheezy != releases.jessie def test_identity(): - assert releases.wheezy is releases.wheezy + assert releases.wheezy is releases.wheezy def test_not_identity(): - assert releases.wheezy is not releases.stable - assert releases.stable is not releases.jessie + assert releases.wheezy is not releases.stable + assert releases.stable is not releases.jessie def test_alias(): - assert releases.oldstable == releases.wheezy - assert releases.stable == releases.jessie - assert releases.testing == releases.stretch - assert releases.unstable == releases.sid + assert releases.oldstable == releases.wheezy + assert releases.stable == releases.jessie + assert releases.testing == releases.stretch + assert releases.unstable == releases.sid @raises(releases.UnknownReleaseException) def test_bogus_releasename(): - releases.get_release('nemo') + releases.get_release('nemo') diff --git a/tests/unit/sectors_tests.py b/tests/unit/sectors_tests.py index 25ebdc0..c156936 100644 --- a/tests/unit/sectors_tests.py +++ b/tests/unit/sectors_tests.py @@ -8,120 +8,120 @@ std_secsz = Bytes(512) def test_init_with_int(): - secsize = 4096 - eq_(Sectors('1MiB', secsize), Sectors(256, secsize)) + secsize = 4096 + eq_(Sectors('1MiB', secsize), Sectors(256, secsize)) def test_lt(): - assert Sectors('1MiB', std_secsz) < Sectors('2MiB', std_secsz) + assert Sectors('1MiB', std_secsz) < Sectors('2MiB', std_secsz) def test_le(): - assert Sectors('1MiB', std_secsz) <= Sectors('2MiB', std_secsz) - assert Sectors('1MiB', std_secsz) <= Sectors('1MiB', std_secsz) + assert Sectors('1MiB', std_secsz) <= Sectors('2MiB', std_secsz) + assert Sectors('1MiB', std_secsz) <= Sectors('1MiB', std_secsz) def test_eq(): - eq_(Sectors('1MiB', std_secsz), Sectors('1MiB', std_secsz)) + eq_(Sectors('1MiB', std_secsz), Sectors('1MiB', std_secsz)) def test_neq(): - assert Sectors('15MiB', std_secsz) != Sectors('1MiB', std_secsz) + assert Sectors('15MiB', std_secsz) != Sectors('1MiB', std_secsz) def test_gt(): - assert Sectors('2MiB', std_secsz) > Sectors('1MiB', std_secsz) + assert Sectors('2MiB', std_secsz) > Sectors('1MiB', std_secsz) def test_ge(): - assert Sectors('2MiB', std_secsz) >= Sectors('1MiB', std_secsz) - assert Sectors('2MiB', std_secsz) >= Sectors('2MiB', std_secsz) + assert Sectors('2MiB', std_secsz) >= Sectors('1MiB', std_secsz) + assert Sectors('2MiB', std_secsz) >= Sectors('2MiB', std_secsz) def test_eq_unit(): - eq_(Sectors('1024MiB', std_secsz), Sectors('1GiB', std_secsz)) + eq_(Sectors('1024MiB', std_secsz), Sectors('1GiB', std_secsz)) def test_add(): - eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) + Sectors('1GiB', std_secsz)) + eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) + Sectors('1GiB', std_secsz)) @raises(UnitError) def test_add_with_diff_secsize(): - Sectors('1GiB', Bytes(512)) + Sectors('1GiB', Bytes(4096)) + Sectors('1GiB', Bytes(512)) + Sectors('1GiB', Bytes(4096)) def test_iadd(): - s = Sectors('1GiB', std_secsz) - s += Sectors('1GiB', std_secsz) - eq_(Sectors('2GiB', std_secsz), s) + s = Sectors('1GiB', std_secsz) + s += Sectors('1GiB', std_secsz) + eq_(Sectors('2GiB', std_secsz), s) def test_sub(): - eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) - Sectors('1GiB', std_secsz)) + eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) - Sectors('1GiB', std_secsz)) def test_sub_int(): - secsize = Bytes('4KiB') - eq_(Sectors('1MiB', secsize), Sectors('1028KiB', secsize) - 1) + secsize = Bytes('4KiB') + eq_(Sectors('1MiB', secsize), Sectors('1028KiB', secsize) - 1) def test_isub(): - s = Sectors('2GiB', std_secsz) - s -= Sectors('1GiB', std_secsz) - eq_(Sectors('1GiB', std_secsz), s) + s = Sectors('2GiB', std_secsz) + s -= Sectors('1GiB', std_secsz) + eq_(Sectors('1GiB', std_secsz), s) def test_mul(): - eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) * 2) + eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) * 2) @raises(UnitError) def test_mul_bytes(): - Sectors('1GiB', std_secsz) * Sectors('1GiB', std_secsz) + Sectors('1GiB', std_secsz) * Sectors('1GiB', std_secsz) def test_imul(): - s = Sectors('1GiB', std_secsz) - s *= 2 - eq_(Sectors('2GiB', std_secsz), s) + s = Sectors('1GiB', std_secsz) + s *= 2 + eq_(Sectors('2GiB', std_secsz), s) def test_div(): - eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) / 2) + eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) / 2) def test_div_bytes(): - eq_(2, Sectors('2GiB', std_secsz) / Sectors('1GiB', std_secsz)) + eq_(2, Sectors('2GiB', std_secsz) / Sectors('1GiB', std_secsz)) def test_idiv(): - s = Sectors('2GiB', std_secsz) - s /= 2 - eq_(Sectors('1GiB', std_secsz), s) + s = Sectors('2GiB', std_secsz) + s /= 2 + eq_(Sectors('1GiB', std_secsz), s) def test_mod(): - eq_(Sectors('256MiB', std_secsz), Sectors('1GiB', std_secsz) % Sectors('768MiB', std_secsz)) + eq_(Sectors('256MiB', std_secsz), Sectors('1GiB', std_secsz) % Sectors('768MiB', std_secsz)) @raises(UnitError) def test_mod_int(): - Sectors('1GiB', std_secsz) % 768 + Sectors('1GiB', std_secsz) % 768 def test_imod(): - s = Sectors('1GiB', std_secsz) - s %= Sectors('768MiB', std_secsz) - eq_(Sectors('256MiB', std_secsz), s) + s = Sectors('1GiB', std_secsz) + s %= Sectors('768MiB', std_secsz) + eq_(Sectors('256MiB', std_secsz), s) @raises(UnitError) def test_imod_int(): - s = Sectors('1GiB', std_secsz) - s %= 5 + s = Sectors('1GiB', std_secsz) + s %= 5 def test_convert_int(): - secsize = 512 - eq_(pow(1024, 3) / secsize, int(Sectors('1GiB', secsize))) + secsize = 512 + eq_(pow(1024, 3) / secsize, int(Sectors('1GiB', secsize))) diff --git a/tests/unit/tools_tests.py b/tests/unit/tools_tests.py index 7c6feca..fc121d0 100644 --- a/tests/unit/tools_tests.py +++ b/tests/unit/tools_tests.py @@ -6,35 +6,35 @@ subprocess_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'sub def setup_logger(): - import logging - root = logging.getLogger() - root.setLevel(logging.NOTSET) + import logging + root = logging.getLogger() + root.setLevel(logging.NOTSET) - import StringIO - output = StringIO.StringIO() - string_handler = logging.StreamHandler(output) - string_handler.setLevel(logging.DEBUG) - root.addHandler(string_handler) - return output + import StringIO + output = StringIO.StringIO() + string_handler = logging.StreamHandler(output) + string_handler.setLevel(logging.DEBUG) + root.addHandler(string_handler) + return output def test_log_call_output_order(): - logged = setup_logger() - fixture = """ + logged = setup_logger() + fixture = """ 2 0.0 one\\\\n 1 0.2 two\\\\n 1 0.2 four\\\\n 2 0.2 No, three..\\\\n 1 0.2 three\\\\n """ - status, stdout, stderr = log_call([subprocess_path], stdin=fixture) - eq_(status, 0) - eq_(stderr, ['one', 'No, three..']) - eq_(stdout, ['two', 'four', 'three']) - expected_order = ['one', - 'two', - 'four', - 'No, three..', - 'three', - ] - eq_(expected_order, logged.getvalue().split("\n")[8:-1]) + status, stdout, stderr = log_call([subprocess_path], stdin=fixture) + eq_(status, 0) + eq_(stderr, ['one', 'No, three..']) + eq_(stdout, ['two', 'four', 'three']) + expected_order = ['one', + 'two', + 'four', + 'No, three..', + 'three', + ] + eq_(expected_order, logged.getvalue().split("\n")[8:-1]) diff --git a/tox.ini b/tox.ini index beecbb8..ca3e9bd 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ envlist = flake8, unit, integration, docs [flake8] -ignore = E101,E221,E241,E501,W191 +ignore = E221,E241,E501 max-line-length = 110 [testenv:flake8]