diff --git a/.gitignore b/.gitignore index ac36a3d..dfcda55 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,5 @@ # Testing /.coverage /.tox/ +/build-servers.yml +/integration.html diff --git a/CHANGELOG b/CHANGELOG deleted file mode 100644 index f030ecc..0000000 --- a/CHANGELOG +++ /dev/null @@ -1,12 +0,0 @@ -2014-05-04: - Dhananjay Balan: - * Salt minion installation & configuration plugin - * Expose debootstrap --include-packages and --exclude-packages options to manifest -2014-05-03: - Anders Ingemann: - * Require hostname setting for vagrant plugin - * Fixes #14: S3 images can now be bootstrapped outside EC2. - * Added enable_agent option to puppet plugin -2014-05-02: - Tomasz Rybak: - * Added Google Compute Engine Provider diff --git a/CHANGELOG.rst b/CHANGELOG.rst new file mode 100644 index 0000000..0d17e6e --- /dev/null +++ b/CHANGELOG.rst @@ -0,0 +1,146 @@ +Changelog +========= + +2015-05-02 +---------- +Anders Ingemann: + * Fix #32: Add image_commands example + * Fix #99: rename image_commands to commands + * Fix #139: Vagrant / Virtualbox provider should set ostype when 32 bits selected + * Fix #204: Create a new phase where user modification tasks can run + +2015-04-29 +---------- +Anders Ingemann: + * Fix #104: Don't verify default target when adding packages + * Fix #217: Implement get_version() function in common.tools + +2015-04-28 +---------- +Jonh Wendell: + * root_password: Enable SSH root login + +2015-04-27 +---------- +John Kristensen: + * Add authentication support to the apt proxy plugin + +2015-04-25 +---------- +Anders Ingemann (work started 2014-08-31, merged on 2015-04-25): + * Introduce `remote bootstrapping `__ + * Introduce `integration testing `__ (for VirtualBox and EC2) + * Merge the end-user documentation into the sphinx docs + (plugin & provider docs are now located in their respective folders as READMEs) + * Include READMEs in sphinx docs and transform their links + * Docs for integration testing + * Document the remote bootstrapping procedure + * Add documentation about the documentation + * Add list of supported builds to the docs + * Add html output to integration tests + * Implement PR #201 by @jszwedko (bump required euca2ools version) + * grub now works on jessie + * extlinux is now running on jessie + * Issue warning when specifying pre/successors across phases (but still error out if it's a conflict) + * Add salt dependencies in the right phase + * extlinux now works with GPT on HVM instances + * Take @ssgelm's advice in #155 and copy the mount table -- df warnings no more + * Generally deny installing grub on squeeze (too much of a hassle to get working, PRs welcome) + * Add 1 sector gap between partitions on GPT + * Add new task: DeterminKernelVersion, this can potentially fix a lot of small problems + * Disable getty processes on jessie through logind config + * Partition volumes by sectors instead of bytes + This allows for finer grained control over the partition sizes and gaps + Add new Sectors unit, enhance Bytes unit, add unit tests for both + * Don't require qemu for raw volumes, use `truncate` instead + * Fix #179: Disabling getty processes task fails half the time + * Split grub and extlinux installs into separate modules + * Fix extlinux config for squeeze + * Fix #136: Make extlinux output boot messages to the serial console + * Extend sed_i to raise Exceptions when the expected amount of replacements is not met + +Jonas Bergler: + * Fixes #145: Fix installation of vbox guest additions. + +Tiago Ilieve: + * Fixes #142: msdos partition type incorrect for swap partition (Linux) + +2015-04-23 +---------- +Tiago Ilieve: + * Fixes #212: Sparse file is created on the current directory + +2014-11-23 +---------- +Noah Fontes: + * Add support for enhanced networking on EC2 images + +2014-07-12 +---------- +Tiago Ilieve: + * Fixes #96: AddBackports is now a common task + +2014-07-09 +---------- +Anders Ingemann: + * Allow passing data into the manifest + * Refactor logging setup to be more modular + * Convert every JSON file to YAML + * Convert "provider" into provider specific section + +2014-07-02 +---------- +Vladimir Vitkov: + * Improve grub options to work better with virtual machines + +2014-06-30 +---------- +Tomasz Rybak: + * Return information about created image + +2014-06-22 +---------- +Victor Marmol: + * Enable the memory cgroup for the Docker plugin + +2014-06-19 +---------- +Tiago Ilieve: + * Fixes #94: allow stable/oldstable as release name on manifest + +Vladimir Vitkov: + * Improve ami listing performance + +2014-06-07 +---------- +Tiago Ilieve: + * Download `gsutil` tarball to workspace instead of working directory + * Fixes #97: remove raw disk image created by GCE after build + +2014-06-06 +---------- +Ilya Margolin: + * pip_install plugin + +2014-05-23 +---------- +Tiago Ilieve: + * Fixes #95: check if the specified APT proxy server can be reached + +2014-05-04 +---------- +Dhananjay Balan: + * Salt minion installation & configuration plugin + * Expose debootstrap --include-packages and --exclude-packages options to manifest + +2014-05-03 +---------- +Anders Ingemann: + * Require hostname setting for vagrant plugin + * Fixes #14: S3 images can now be bootstrapped outside EC2. + * Added enable_agent option to puppet plugin + +2014-05-02 +---------- +Tomasz Rybak: + * Added Google Compute Engine Provider diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 0cbba9d..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,42 +0,0 @@ -Contributing -============ - -Do you want to contribute to the bootstrap-vz project? Nice! Here is the basic workflow: - -* Read the [development guidelines](http://bootstrap-vz.readthedocs.org/en/master/guidelines.html) -* Fork this repository. -* Make any changes you want/need. -* Check the coding style of your changes using [tox](http://tox.readthedocs.org/) by running `tox -e flake8` - and fix any warnings that may appear. - This check will be repeated by [Travis CI](https://travis-ci.org/andsens/bootstrap-vz) - once you send a pull request, so it's better if you check this beforehand. -* If the change is significant (e.g. a new plugin, manifest setting or security fix) - add your name and contribution to the [CHANGELOG](CHANGELOG). -* Commit your changes. -* Squash the commits if needed. For instance, it is fine if you have multiple commits describing atomic units - of work, but there's no reason to have many little commits just because of corrected typos. -* Push to your fork, preferably on a topic branch. - -From here on there are two paths to consider: - -If your patch is a new feature, e.g.: plugin, provider, etc. then: - -* Send a pull request to the `development` branch. It will be merged into the `master` branch when we can make - sure that the code is stable. - -If it is a bug/security fix: - -* Send a pull request to the `master` branch. - --- - -Please try to be very descriptive about your changes when you write a pull request, stating what it does, why -it is needed, which use cases this change covers etc. -You may be asked to rebase your work on the current branch state, so it can be merged cleanly. -If you push a new commit to your pull request you will have to add a new comment to the PR, -provided that you want us notified. Github will otherwise not send a notification. - -Be aware that your modifications need to be properly documented and pushed to the `gh-pages` branch, if they -concern anything done on `master`. Otherwise, they should be sent to the `gh-pages-dev`. - -Happy hacking! :-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..e615fb8 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,165 @@ +Contributing +============ + + +Sending pull requests +--------------------- +Do you want to contribute to the bootstrap-vz project? Nice! Here is the basic workflow: + +* Read the `development guidelines <#development-guidelines>`__ +* Fork this repository. +* Make any changes you want/need. +* Check the coding style of your changes using `tox `__ by running `tox -e flake8` + and fix any warnings that may appear. + This check will be repeated by `Travis CI `__ + once you send a pull request, so it's better if you check this beforehand. +* If the change is significant (e.g. a new plugin, manifest setting or security fix) + add your name and contribution to the `changelog `__. +* Commit your changes. +* Squash the commits if needed. For instance, it is fine if you have multiple commits describing atomic units + of work, but there's no reason to have many little commits just because of corrected typos. +* Push to your fork, preferably on a topic branch. +* Send a pull request to the `master` branch. + +Please try to be very descriptive about your changes when you write a pull request, stating what it does, why +it is needed, which use cases this change covers, etc. +You may be asked to rebase your work on the current branch state, so it can be merged cleanly. +If you push a new commit to your pull request you will have to add a new comment to the PR, +provided that you want us notified. Github will otherwise not send a notification. + +Be aware that your modifications need to be properly documented. Please take a look at the +`documentation section <#documentation>`__ to see how to do that. + +Happy hacking! :-) + + +Development guidelines +---------------------- + +The following guidelines should serve as general advice when +developing providers or plugins for bootstrap-vz. Keep in mind that +these guidelines are not rules , they are advice on how to better add +value to the bootstrap-vz codebase. + + +The manifest should always fully describe the resulting image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The outcome of a bootstrapping process should never depend on settings +specified elsewhere. + +This allows others to easily reproduce any setup other people are running +and makes it possible to share manifests. +`The official debian EC2 images`__ for example can be reproduced +using the manifests available in the manifest directory of bootstrap-vz. + +__ https:/aws.amazon.com/marketplace/seller-profile?id=890be55d-32d8-4bc8-9042-2b4fd83064d5 + +The bootstrapper should always be able to run fully unattended +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +For end users, this guideline minimizes the risk of errors. Any +required input would also be in direct conflict with the previous +guideline that the manifest should always fully describe the resulting +image. + +Additionally developers may have to run the bootstrap +process multiple times though, any prompts in the middle of that +process may significantly slow down the development speed. + + +The bootstrapper should only need as much setup as the manifest requires +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Having to shuffle specific paths on the host into place +(e.g. ``/target`` has to be created manually) to get the bootstrapper +running is going to increase the rate of errors made by users. +Aim for minimal setup. + +Exceptions are of course things such as the path to +the VirtualBox Guest Additions ISO or tools like ``parted`` that +need to be installed on the host. + + +Roll complexity into which tasks are added to the tasklist +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If a ``run()`` function checks whether it should do any work or simply be +skipped, consider doing that check in ``resolve_tasks()`` instead and +avoid adding that task alltogether. This allows people looking at the +tasklist in the logfile to determine what work has been performed. + +If a task says it will modify a file but then bails , a developer may get +confused when looking at that file after bootstrapping. He could +conclude that the file has either been overwritten or that the +search & replace does not work correctly. + + +Control flow should be directed from the task graph +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Avoid creating complicated ``run()`` functions. If necessary, split up +a function into two semantically separate tasks. + +This allows other tasks to interleave with the control-flow and add extended +functionality (e.g. because volume creation and mounting are two +separate tasks, `the prebootstrapped plugin +`__ +can replace the volume creation task with a task of its own that +creates a volume from a snapshot instead, but still reuse the mount task). + + +Task classes should be treated as decorated run() functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Tasks should not have any state, thats what the +BootstrapInformation object is for. + +Only add stuff to the BootstrapInformation object when really necessary +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This is mainly to avoid clutter. + + +Use a json-schema to check for allowed settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The json-schema may be verbose but it keeps the bulk of check work outside the +python code, which is a big plus when it comes to readability. +This only applies bas long as the checks are simple. +You can of course fall back to doing the check in python when that solution is +considerably less complex. + + +When invoking external programs, use long options whenever possible +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This makes the commands a lot easier to understand, since +the option names usually hint at what they do. + + +When invoking external programs, don't use full paths, rely on ``$PATH`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This increases robustness when executable locations change. +Example: Use ``log_call(['wget', ...])`` instead of ``log_call(['/usr/bin/wget', ...])``. + + +Coding style +------------ +bootstrap-vz is coded to comply closely with the PEP8 style +guidelines. There however a few exceptions: + +* Max line length is 110 chars, not 80. +* Multiple assignments may be aligned with spaces so that the = match + vertically. +* Ignore ``E101``: Indent with tabs and align with spaces +* Ignore ``E221 & E241``: Alignment of assignments +* Ignore ``E501``: The max line length is not 80 characters +* Ignore ``W191``: Indent with tabs not spaces + +The codebase can be checked for any violations quite easily, since those rules are already specified in the +`tox `__ configuration file. +:: + + tox -e flake8 + + +Documentation +------------- +When developing a provider or plugin, make sure to update/create the README.rst +located in provider/plugin folder. +Any links to other rst files should be relative and work, when viewed on github. +For information on `how to build the documentation `_ and how +the various parts fit together, +refer to `the documentation about the documentation `__ :-) diff --git a/MANIFEST.in b/MANIFEST.in index a671734..e165705 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,4 @@ include LICENSE include manifests/* recursive-include bootstrapvz assets/* recursive-include bootstrapvz *.json +recursive-include bootstrapvz *.yml diff --git a/README.md b/README.md deleted file mode 100644 index 8eb57fe..0000000 --- a/README.md +++ /dev/null @@ -1,45 +0,0 @@ -bootstrap-vz -=========================================== -bootstrap-vz is a bootstrapping framework for Debian. -It is is specifically targeted at bootstrapping systems for virtualized environments. -bootstrap-vz runs without any user intervention and generates ready-to-boot images for -[a number of virtualization platforms](http://andsens.github.io/bootstrap-vz/providers.html). -Its aim is to provide a reproducable bootstrapping process using [manifests](http://andsens.github.io/bootstrap-vz/manifest.html) as well as supporting a high degree of customizability through plugins. - -bootstrap-vz was coded from scratch in python once the bash script architecture that was used in the -[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached its -limits. - -Documentation -------------- -The end-user documentation for bootstrap-vz is available -at [andsens.github.io/bootstrap-vz](http://andsens.github.io/bootstrap-vz). -There, you can discover [what the dependencies](http://andsens.github.io/bootstrap-vz/#dependencies) -for a specific cloud provider are, [see a list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html) -and learn [how you create a manifest](http://andsens.github.io/bootstrap-vz/manifest.html). - -Installation ------------- - -bootstrap-vz has a master branch for stable releases and a development for, well, development. -After checking out the branch of your choice you can install the python dependencies by running -`python setup.py install`. However, depending on what kind of image you'd like to bootstrap, -there are other debian package dependencies as well, at the very least you will need `debootstrap`. -[The documentation](http://andsens.github.io/bootstrap-vz/) explains this in more detail. - -Note that bootstrap-vz will tell you which tools it requires when they aren't -present (the different packages are mentioned in the error message), so you can -simply run bootstrap-vz once to get a list of the packages, install them, -and then re-run. - -Developers ----------- -The API documentation, development guidelines and an explanation of bootstrap-vz internals -can be found at [bootstrap-vz.readthedocs.org](http://bootstrap-vz.readthedocs.org). - -Contributing ------------- - -Contribution guidelines are described on the [CONTRIBUTING](CONTRIBUTING.md) file. There's also a -[topic on the documentation](http://bootstrap-vz.readthedocs.org/en/development/guidelines.html#coding-style) -regarding the coding style. diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..81b0768 --- /dev/null +++ b/README.rst @@ -0,0 +1,153 @@ +bootstrap-vz +============ + +bootstrap-vz is a bootstrapping framework for Debian that creates ready-to-boot +images able to run on a number of cloud providers and virtual machines. +bootstrap-vz runs without any user intervention and +generates ready-to-boot images for a number of virtualization +platforms. +Its aim is to provide a reproducable bootstrapping process using +`manifests `__ +as well as supporting a high degree of customizability through plugins. + +bootstrap-vz was coded from scratch in python once the bash script +architecture that was used in the +`build-debian-cloud `__ +bootstrapper reached its limits. + +Documentation +------------- + +The documentation for bootstrap-vz is available at +`bootstrap-vz.readthedocs.org `__. +There, you can discover `what the dependencies <#dependencies>`__ for +a specific cloud provider are, `see a list of available plugins `__ +and learn `how you create a manifest `__. + +Note to developers: `The documentaion `__ is generated in +a rather peculiar and nifty way. + +Installation +------------ + +bootstrap-vz has a master branch for stable releases and a development +for, well, development. + +After checking out the branch of your choice you can install the +python dependencies by running ``python setup.py install``. However, +depending on what kind of image you'd like to bootstrap, there are +other debian package dependencies as well, at the very least you will +need ``debootstrap``. +`The documentation `__ +explains this in more detail. + +Note that bootstrap-vz will tell you which tools it requires when they +aren't present (the different packages are mentioned in the error +message), so you can simply run bootstrap-vz once to get a list of the +packages, install them, and then re-run. + +Quick start +----------- + +Here are a few quickstart tutorials for the most common images. +If you plan on partitioning your volume, you will need the ``parted`` +package and ``kpartx``: + +.. code:: sh + + root@host:~# apt-get install parted kpartx + +Note that you can always abort a bootstrapping process by pressing +``Ctrl+C``, bootstrap-vz will then initiate a cleanup/rollback process, +where volumes are detached/deleted and temporary files removed, pressing +``Ctrl+C`` a second time shortcuts that procedure, halts the cleanup and +quits the process. + +VirtualBox Vagrant +~~~~~~~~~~~~~~~~~~ + +.. code:: sh + + user@host:~$ sudo -i # become root + root@host:~# git clone https://github.com/andsens/bootstrap-vz.git # Clone the repo + root@host:~# apt-get install qemu-utils debootstrap python-pip # Install dependencies from aptitude + root@host:~# pip install termcolor jsonschema fysom docopt pyyaml # Install python dependencies + root@host:~# bootstrap-vz/bootstrap-vz bootstrap-vz/manifests/virtualbox-vagrant.manifest.yml + +If you want to use the `minimize\_size `__ plugin, +you will have to install the ``zerofree`` package and `VMWare Workstation`__ as well. + +__ https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0 + +Amazon EC2 EBS backed AMI +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: sh + + user@host:~$ sudo -i # become root + root@host:~# git clone https://github.com/andsens/bootstrap-vz.git # Clone the repo + root@host:~# apt-get install debootstrap python-pip # Install dependencies from aptitude + root@host:~# pip install termcolor jsonschema fysom docopt pyyaml boto # Install python dependencies + root@host:~# bootstrap-vz/bootstrap-vz bootstrap-vz/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.yml + +To bootstrap S3 backed AMIs, bootstrap-vz will also need the +``euca2ools`` package. However, version 3.2.0 is required meaning you +must however install it directly from the eucalyptus repository like +this: + +.. code:: sh + + apt-get install --no-install-recommends python-dev libxml2-dev libxslt-dev gcc + pip install git+git://github.com/eucalyptus/euca2ools.git@v3.2.0 + +Cleanup +------- + +bootstrap-vz tries very hard to clean up after itself both if a run was +successful but also if it failed. This ensures that you are not left +with volumes still attached to the host which are useless. If an error +occurred you can simply correct the problem that caused it and rerun +everything, there will be no leftovers from the previous run (as always +there are of course rare/unlikely exceptions to that rule). The error +messages should always give you a strong hint at what is wrong, if that +is not the case please consider `opening an issue`__ and attach +both the error message and your manifest (preferably as a gist or +similar). + +__ https://github.com/andsens/bootstrap-vz/issues + +Dependencies +------------ + +bootstrap-vz has a number of dependencies depending on the target +platform and `the selected plugins `__. +At a bare minimum the following python libraries are needed: + +* `termcolor `__ +* `fysom `__ +* `jsonschema `__ +* `docopt `__ +* `pyyaml `__ + +To bootstrap Debian itself `debootstrap`__ is needed as well. + +__ https://packages.debian.org/wheezy/debootstrap + +Any other requirements are dependent upon the manifest configuration +and are detailed in the corresponding sections of the documentation. +bootstrap-vz will however warn you if a requirement has not been met, +before the bootstrapping process begins. + +Developers +---------- + +The API documentation, development guidelines and an explanation of +bootstrap-vz internals can be found at `bootstrap-vz.readthedocs.org`__. + +__ http://bootstrap-vz.readthedocs.org/en/master/developers + +Contributing +------------ + +Contribution guidelines are described in the documentation under `Contributing `__. +There's also a topic regarding `the coding style `__. diff --git a/bootstrap-vz b/bootstrap-vz index 46f8fbf..de62f32 100755 --- a/bootstrap-vz +++ b/bootstrap-vz @@ -1,5 +1,5 @@ #!/usr/bin/env python if __name__ == '__main__': - from bootstrapvz.base import main + from bootstrapvz.base.main import main main() diff --git a/bootstrap-vz-remote b/bootstrap-vz-remote new file mode 100755 index 0000000..d7b7254 --- /dev/null +++ b/bootstrap-vz-remote @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +if __name__ == '__main__': + from bootstrapvz.remote.main import main + main() diff --git a/bootstrap-vz-server b/bootstrap-vz-server new file mode 100755 index 0000000..bf941a0 --- /dev/null +++ b/bootstrap-vz-server @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +if __name__ == '__main__': + from bootstrapvz.remote.server import main + main() diff --git a/docs/howitworks.rst b/bootstrapvz/README.rst similarity index 81% rename from docs/howitworks.rst rename to bootstrapvz/README.rst index 7d600cd..2ca112d 100644 --- a/docs/howitworks.rst +++ b/bootstrapvz/README.rst @@ -1,6 +1,5 @@ - How bootstrap-vz works -====================== +---------------------- Tasks ~~~~~ @@ -15,14 +14,14 @@ via attributes. Here is an example: :: class MapPartitions(Task): - description = 'Mapping volume partitions' - phase = phases.volume_preparation - predecessors = [PartitionVolume] - successors = [filesystem.Format] - - @classmethod - def run(cls, info): - info.volume.partition_map.map(info.volume) + description = 'Mapping volume partitions' + phase = phases.volume_preparation + predecessors = [PartitionVolume] + successors = [filesystem.Format] + + @classmethod + def run(cls, info): + info.volume.partition_map.map(info.volume) In this case the attributes define that the task at hand should run after the ``PartitionVolume`` task — i.e. after volume has been @@ -36,7 +35,7 @@ successors. The final task list that will be executed is computed by enumerating all tasks in the package, placing them in the graph and -`sorting them topoligcally `_. +`sorting them topologically `_. Subsequently the list returned is filtered to contain only the tasks the provider and the plugins added to the taskset. diff --git a/bootstrapvz/base/__init__.py b/bootstrapvz/base/__init__.py index 1cafa81..4bcf580 100644 --- a/bootstrapvz/base/__init__.py +++ b/bootstrapvz/base/__init__.py @@ -1,8 +1,9 @@ -__all__ = ['Phase', 'Task', 'main'] from phase import Phase from task import Task from main import main +__all__ = ['Phase', 'Task', 'main'] + def validate_manifest(data, validator, error): """Validates the manifest using the base manifest @@ -12,10 +13,22 @@ def validate_manifest(data, validator, error): :param function error: The function tha raises an error when the validation fails """ import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) + from bootstrapvz.common.releases import get_release + from bootstrapvz.common.releases import squeeze + release = get_release(data['system']['release']) + + if release < squeeze: + error('Only Debian squeeze and later is supported', ['system', 'release']) + # Check the bootloader/partitioning configuration. # Doing this via the schema is a pain and does not output a useful error message. - if data['system']['bootloader'] == 'grub' and data['volume']['partitions']['type'] == 'none': + if data['system']['bootloader'] == 'grub': + + if data['volume']['partitions']['type'] == 'none': error('Grub cannot boot from unpartitioned disks', ['system', 'bootloader']) + + if release == squeeze: + error('Grub installation on squeeze is not supported', ['system', 'bootloader']) diff --git a/bootstrapvz/base/bootstrapinfo.py b/bootstrapvz/base/bootstrapinfo.py index 7aba5b0..2990d24 100644 --- a/bootstrapvz/base/bootstrapinfo.py +++ b/bootstrapvz/base/bootstrapinfo.py @@ -31,12 +31,6 @@ class BootstrapInformation(object): # The default apt mirror self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian') - # Normalize the release codenames so that tasks may query for release codenames rather than - # 'stable', 'unstable' etc. This is useful when handling cases that are specific to a release. - release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json') - from bootstrapvz.common.tools import config_get - self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']]) - # Create the manifest_vars dictionary self.manifest_vars = self.__create_manifest_vars(self.manifest, {'apt_mirror': self.apt_mirror}) @@ -81,17 +75,6 @@ class BootstrapInformation(object): :return: The manifest_vars dictionary :rtype: dict """ - class DictClass(dict): - """Tiny extension of dict to allow setting and getting keys via attributes - """ - def __getattr__(self, name): - return self[name] - - def __setattr__(self, name, value): - self[name] = value - - def __delattr__(self, name): - del self[name] def set_manifest_vars(obj, data): """Runs through the manifest and creates DictClasses for every key @@ -127,3 +110,47 @@ class BootstrapInformation(object): # They are added last so that they may override previous variables set_manifest_vars(manifest_vars, additional_vars) return manifest_vars + + def __getstate__(self): + from bootstrapvz.remote import supported_classes + + def can_serialize(obj): + if hasattr(obj, '__class__') and hasattr(obj, '__module__'): + class_name = obj.__module__ + '.' + obj.__class__.__name__ + return class_name in supported_classes or isinstance(obj, (BaseException, Exception)) + return True + + def filter_state(state): + if isinstance(state, dict): + return {key: filter_state(val) for key, val in state.items() if can_serialize(val)} + if isinstance(state, (set, tuple, list, frozenset)): + return type(state)(filter_state(val) for val in state if can_serialize(val)) + return state + + state = filter_state(self.__dict__) + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] + + +class DictClass(dict): + """Tiny extension of dict to allow setting and getting keys via attributes + """ + def __getattr__(self, name): + return self[name] + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, state): + for key in state: + self[key] = state[key] diff --git a/bootstrapvz/base/fs/__init__.py b/bootstrapvz/base/fs/__init__.py index 6ffe21f..9f7742b 100644 --- a/bootstrapvz/base/fs/__init__.py +++ b/bootstrapvz/base/fs/__init__.py @@ -9,27 +9,33 @@ def load_volume(data, bootloader): :return: The volume that represents all information pertaining to the volume we bootstrap on. :rtype: Volume """ - # Create a mapping between valid partition maps in the manifest and their corresponding classes + # Map valid partition maps in the manifest and their corresponding classes from partitionmaps.gpt import GPTPartitionMap from partitionmaps.msdos import MSDOSPartitionMap from partitionmaps.none import NoPartitions - partition_maps = {'none': NoPartitions, - 'gpt': GPTPartitionMap, - 'msdos': MSDOSPartitionMap, - } - # Instantiate the partition map - partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader) + partition_map = {'none': NoPartitions, + 'gpt': GPTPartitionMap, + 'msdos': MSDOSPartitionMap, + }.get(data['partitions']['type']) - # Create a mapping between valid volume backings in the manifest and their corresponding classes + # Map valid volume backings in the manifest and their corresponding classes from bootstrapvz.common.fs.loopbackvolume import LoopbackVolume from bootstrapvz.providers.ec2.ebsvolume import EBSVolume from bootstrapvz.common.fs.virtualdiskimage import VirtualDiskImage from bootstrapvz.common.fs.virtualmachinedisk import VirtualMachineDisk - volume_backings = {'raw': LoopbackVolume, - 's3': LoopbackVolume, - 'vdi': VirtualDiskImage, - 'vmdk': VirtualMachineDisk, - 'ebs': EBSVolume - } + volume_backing = {'raw': LoopbackVolume, + 's3': LoopbackVolume, + 'vdi': VirtualDiskImage, + 'vmdk': VirtualMachineDisk, + 'ebs': EBSVolume + }.get(data['backing']) + + # Instantiate the partition map + from bootstrapvz.common.bytes import Bytes + # Only operate with a physical sector size of 512 bytes for now, + # not sure if we can change that for some of the virtual disks + sector_size = Bytes('512B') + partition_map = partition_map(data['partitions'], sector_size, bootloader) + # Create the volume with the partition map as an argument - return volume_backings.get(data['backing'])(partition_map) + return volume_backing(partition_map) diff --git a/bootstrapvz/base/fs/partitionmaps/abstract.py b/bootstrapvz/base/fs/partitionmaps/abstract.py index 43f9e79..cde7c2f 100644 --- a/bootstrapvz/base/fs/partitionmaps/abstract.py +++ b/bootstrapvz/base/fs/partitionmaps/abstract.py @@ -37,7 +37,7 @@ class AbstractPartitionMap(FSMProxy): """Returns the total size the partitions occupy :return: The size of all partitions - :rtype: Bytes + :rtype: Sectors """ # We just need the endpoint of the last partition return self.partitions[-1].get_end() @@ -74,6 +74,7 @@ class AbstractPartitionMap(FSMProxy): '{device_path} (?P\d+)$' .format(device_path=volume.device_path)) log_check_call(['kpartx', '-as', volume.device_path]) + import os.path # Run through the kpartx output and map the paths to the partitions for mapping in mappings: @@ -87,15 +88,15 @@ class AbstractPartitionMap(FSMProxy): # Check if any partition was not mapped for idx, partition in enumerate(self.partitions): if partition.fsm.current not in ['mapped', 'formatted']: - raise PartitionError('kpartx did not map partition #' + str(idx + 1)) + raise PartitionError('kpartx did not map partition #' + str(partition.get_index())) - except PartitionError as e: + except PartitionError: # Revert any mapping and reraise the error for partition in self.partitions: - if not partition.fsm.can('unmap'): + if partition.fsm.can('unmap'): partition.unmap() log_check_call(['kpartx', '-ds', volume.device_path]) - raise e + raise def unmap(self, volume): """Unmaps the partition diff --git a/bootstrapvz/base/fs/partitionmaps/gpt.py b/bootstrapvz/base/fs/partitionmaps/gpt.py index 8472338..44f8385 100644 --- a/bootstrapvz/base/fs/partitionmaps/gpt.py +++ b/bootstrapvz/base/fs/partitionmaps/gpt.py @@ -8,12 +8,14 @@ class GPTPartitionMap(AbstractPartitionMap): """Represents a GPT partition map """ - def __init__(self, data, bootloader): + def __init__(self, data, sector_size, bootloader): """ :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume :param str bootloader: Name of the bootloader we will use for bootstrapping """ - from bootstrapvz.common.bytes import Bytes + from bootstrapvz.common.sectors import Sectors + # List of partitions self.partitions = [] @@ -21,42 +23,63 @@ class GPTPartitionMap(AbstractPartitionMap): def last_partition(): return self.partitions[-1] if len(self.partitions) > 0 else None - # If we are using the grub bootloader we need to create an unformatted partition - # at the beginning of the map. Its size is 1007kb, which we will steal from the - # next partition. if bootloader == 'grub': + # If we are using the grub bootloader we need to create an unformatted partition + # at the beginning of the map. Its size is 1007kb, which seems to be chosen so that + # primary gpt + grub = 1024KiB + # The 1 MiB will be subtracted later on, once we know what the subsequent partition is from ..partitions.unformatted import UnformattedPartition - self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition()) - # Mark the partition as a bios_grub partition - self.grub_boot.flags.append('bios_grub') + self.grub_boot = UnformattedPartition(Sectors('1MiB', sector_size), last_partition()) self.partitions.append(self.grub_boot) + # Offset all partitions by 1 sector. + # parted in jessie has changed and no longer allows + # partitions to be right next to each other. + partition_gap = Sectors(1, sector_size) + # The boot and swap partitions are optional if 'boot' in data: - self.boot = GPTPartition(Bytes(data['boot']['size']), + self.boot = GPTPartition(Sectors(data['boot']['size'], sector_size), data['boot']['filesystem'], data['boot'].get('format_command', None), 'boot', last_partition()) + if self.boot.previous is not None: + # No need to pad if this is the first partition + self.boot.pad_start += partition_gap + self.boot.size -= partition_gap self.partitions.append(self.boot) + if 'swap' in data: - self.swap = GPTSwapPartition(Bytes(data['swap']['size']), last_partition()) + self.swap = GPTSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) + if self.swap.previous is not None: + self.swap.pad_start += partition_gap + self.swap.size -= partition_gap self.partitions.append(self.swap) - self.root = GPTPartition(Bytes(data['root']['size']), + + self.root = GPTPartition(Sectors(data['root']['size'], sector_size), data['root']['filesystem'], data['root'].get('format_command', None), 'root', last_partition()) + if self.root.previous is not None: + self.root.pad_start += partition_gap + self.root.size -= partition_gap self.partitions.append(self.root) - # We need to move the first partition to make space for the gpt offset - gpt_offset = Bytes('17KiB') - self.partitions[0].offset += gpt_offset - if hasattr(self, 'grub_boot'): - # grub_boot should not increase the size of the volume, - # so we reduce the size of the succeeding partition. - # gpt_offset is included here, because of the offset we added above (grub_boot is partition[0]) - self.partitions[1].size -= self.grub_boot.get_end() + # Mark the grub partition as a bios_grub partition + self.grub_boot.flags.append('bios_grub') + # Subtract the grub partition size from the subsequent partition + self.partitions[1].size -= self.grub_boot.size else: - # Avoid increasing the volume size because of gpt_offset - self.partitions[0].size -= gpt_offset + # Not using grub, mark the boot partition or root as bootable + getattr(self, 'boot', self.root).flags.append('legacy_boot') + + # The first and last 34 sectors are reserved for the primary/secondary GPT + primary_gpt_size = Sectors(34, sector_size) + self.partitions[0].pad_start += primary_gpt_size + self.partitions[0].size -= primary_gpt_size + + secondary_gpt_size = Sectors(34, sector_size) + self.partitions[-1].pad_end += secondary_gpt_size + self.partitions[-1].size -= secondary_gpt_size super(GPTPartitionMap, self).__init__(bootloader) diff --git a/bootstrapvz/base/fs/partitionmaps/msdos.py b/bootstrapvz/base/fs/partitionmaps/msdos.py index 1b726a5..6c0d25d 100644 --- a/bootstrapvz/base/fs/partitionmaps/msdos.py +++ b/bootstrapvz/base/fs/partitionmaps/msdos.py @@ -9,12 +9,14 @@ class MSDOSPartitionMap(AbstractPartitionMap): Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is) """ - def __init__(self, data, bootloader): + def __init__(self, data, sector_size, bootloader): """ :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume :param str bootloader: Name of the bootloader we will use for bootstrapping """ - from bootstrapvz.common.bytes import Bytes + from bootstrapvz.common.sectors import Sectors + # List of partitions self.partitions = [] @@ -24,16 +26,30 @@ class MSDOSPartitionMap(AbstractPartitionMap): # The boot and swap partitions are optional if 'boot' in data: - self.boot = MSDOSPartition(Bytes(data['boot']['size']), + self.boot = MSDOSPartition(Sectors(data['boot']['size'], sector_size), data['boot']['filesystem'], data['boot'].get('format_command', None), last_partition()) self.partitions.append(self.boot) + + # Offset all partitions by 1 sector. + # parted in jessie has changed and no longer allows + # partitions to be right next to each other. + partition_gap = Sectors(1, sector_size) + if 'swap' in data: - self.swap = MSDOSSwapPartition(Bytes(data['swap']['size']), last_partition()) + self.swap = MSDOSSwapPartition(Sectors(data['swap']['size'], sector_size), last_partition()) + if self.swap.previous is not None: + # No need to pad if this is the first partition + self.swap.pad_start += partition_gap + self.swap.size -= partition_gap self.partitions.append(self.swap) - self.root = MSDOSPartition(Bytes(data['root']['size']), + + self.root = MSDOSPartition(Sectors(data['root']['size'], sector_size), data['root']['filesystem'], data['root'].get('format_command', None), last_partition()) + if self.root.previous is not None: + self.root.pad_start += partition_gap + self.root.size -= partition_gap self.partitions.append(self.root) # Mark boot as the boot partition, or root, if boot does not exist @@ -44,12 +60,18 @@ class MSDOSPartitionMap(AbstractPartitionMap): # The MBR offset is included in the grub offset, so if we don't use grub # we should reduce the size of the first partition and move it by only 512 bytes. if bootloader == 'grub': - offset = Bytes('2MiB') + mbr_offset = Sectors('2MiB', sector_size) else: - offset = Bytes('512B') + mbr_offset = Sectors('512B', sector_size) - self.partitions[0].offset += offset - self.partitions[0].size -= offset + self.partitions[0].pad_start += mbr_offset + self.partitions[0].size -= mbr_offset + + # Leave the last sector unformatted + # parted in jessie thinks that a partition 10 sectors in size + # goes from sector 0 to sector 9 (instead of 0 to 10) + self.partitions[-1].pad_end += 1 + self.partitions[-1].size -= 1 super(MSDOSPartitionMap, self).__init__(bootloader) diff --git a/bootstrapvz/base/fs/partitionmaps/none.py b/bootstrapvz/base/fs/partitionmaps/none.py index d9b122a..944f8a5 100644 --- a/bootstrapvz/base/fs/partitionmaps/none.py +++ b/bootstrapvz/base/fs/partitionmaps/none.py @@ -7,14 +7,16 @@ class NoPartitions(object): simply always deal with partition maps and then let the base abstract that away. """ - def __init__(self, data, bootloader): + def __init__(self, data, sector_size, bootloader): """ :param dict data: volume.partitions part of the manifest + :param int sector_size: Sectorsize of the volume :param str bootloader: Name of the bootloader we will use for bootstrapping """ - from bootstrapvz.common.bytes import Bytes + from bootstrapvz.common.sectors import Sectors + # In the NoPartitions partitions map we only have a single 'partition' - self.root = SinglePartition(Bytes(data['root']['size']), + self.root = SinglePartition(Sectors(data['root']['size'], sector_size), data['root']['filesystem'], data['root'].get('format_command', None)) self.partitions = [self.root] @@ -29,6 +31,15 @@ class NoPartitions(object): """Returns the total size the partitions occupy :return: The size of all the partitions - :rtype: Bytes + :rtype: Sectors """ return self.root.get_end() + + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/base/fs/partitions/abstract.py b/bootstrapvz/base/fs/partitions/abstract.py index 6d3cf48..9d481d7 100644 --- a/bootstrapvz/base/fs/partitions/abstract.py +++ b/bootstrapvz/base/fs/partitions/abstract.py @@ -1,6 +1,6 @@ from abc import ABCMeta from abc import abstractmethod -import os.path +from bootstrapvz.common.sectors import Sectors from bootstrapvz.common.tools import log_check_call from bootstrapvz.common.fsm_proxy import FSMProxy @@ -19,42 +19,6 @@ class AbstractPartition(FSMProxy): {'name': 'unmount', 'src': 'mounted', 'dst': 'formatted'}, ] - class Mount(object): - """Represents a mount into the partition - """ - def __init__(self, source, destination, opts): - """ - :param str,AbstractPartition source: The path from where we mount or a partition - :param str destination: The path of the mountpoint - :param list opts: List of options to pass to the mount command - """ - self.source = source - self.destination = destination - self.opts = opts - - def mount(self, prefix): - """Performs the mount operation or forwards it to another partition - - :param str prefix: Path prefix of the mountpoint - """ - mount_dir = os.path.join(prefix, self.destination) - # If the source is another partition, we tell that partition to mount itself - if isinstance(self.source, AbstractPartition): - self.source.mount(destination=mount_dir) - else: - log_check_call(['mount'] + self.opts + [self.source, mount_dir]) - self.mount_dir = mount_dir - - def unmount(self): - """Performs the unmount operation or asks the partition to unmount itself - """ - # If its a partition, it can unmount itself - if isinstance(self.source, AbstractPartition): - self.source.unmount() - else: - log_check_call(['umount', self.mount_dir]) - del self.mount_dir - def __init__(self, size, filesystem, format_command): """ :param Bytes size: Size of the partition @@ -64,6 +28,9 @@ class AbstractPartition(FSMProxy): self.size = size self.filesystem = filesystem self.format_command = format_command + # Initialize the start & end padding to 0 sectors, may be changed later + self.pad_start = Sectors(0, size.sector_size) + self.pad_end = Sectors(0, size.sector_size) # Path to the partition self.device_path = None # Dictionary with mount points as keys and Mount objects as values @@ -90,9 +57,9 @@ class AbstractPartition(FSMProxy): """Gets the end of the partition :return: The end of the partition - :rtype: Bytes + :rtype: Sectors """ - return self.get_start() + self.size + return self.get_start() + self.pad_start + self.size + self.pad_end def _before_format(self, e): """Formats the partition @@ -143,7 +110,8 @@ class AbstractPartition(FSMProxy): :param list opts: Any options that should be passed to the mount command """ # Create a new mount object, mount it if the partition is mounted and put it in the mounts dict - mount = self.Mount(source, destination, opts) + from mount import Mount + mount = Mount(source, destination, opts) if self.fsm.current == 'mounted': mount.mount(self.mount_dir) self.mounts[destination] = mount diff --git a/bootstrapvz/base/fs/partitions/base.py b/bootstrapvz/base/fs/partitions/base.py index e67e733..df60712 100644 --- a/bootstrapvz/base/fs/partitions/base.py +++ b/bootstrapvz/base/fs/partitions/base.py @@ -1,4 +1,6 @@ +import os from abstract import AbstractPartition +from bootstrapvz.common.sectors import Sectors class BasePartition(AbstractPartition): @@ -25,14 +27,13 @@ class BasePartition(AbstractPartition): :param list format_command: Optional format command, valid variables are fs, device_path and size :param BasePartition previous: The partition that preceeds this one """ - # By saving the previous partition we have - # a linked list that partitions can go backwards in to find the first partition. + # By saving the previous partition we have a linked list + # that partitions can go backwards in to find the first partition. self.previous = previous - from bootstrapvz.common.bytes import Bytes - # Initialize the offset to 0 bytes, may be changed later - self.offset = Bytes(0) # List of flags that parted should put on the partition self.flags = [] + # Path to symlink in /dev/disk/by-uuid (manually maintained by this class) + self.disk_by_uuid_path = None super(BasePartition, self).__init__(size, filesystem, format_command) def create(self, volume): @@ -59,30 +60,56 @@ class BasePartition(AbstractPartition): """Gets the starting byte of this partition :return: The starting byte of this partition - :rtype: Bytes + :rtype: Sectors """ if self.previous is None: - # If there is no previous partition, this partition begins at the offset - return self.offset + return Sectors(0, self.size.sector_size) else: - # Get the end of the previous partition and add the offset of this partition - return self.previous.get_end() + self.offset + return self.previous.get_end() def map(self, device_path): """Maps the partition to a device_path - :param str device_path: The device patht his partition should be mapped to + :param str device_path: The device path this partition should be mapped to """ self.fsm.map(device_path=device_path) + def link_uuid(self): + # /lib/udev/rules.d/60-kpartx.rules does not create symlinks in /dev/disk/by-{uuid,label} + # This patch would fix that: http://www.redhat.com/archives/dm-devel/2013-July/msg00080.html + # For now we just do the uuid part ourselves. + # This is mainly to fix a problem in update-grub where /etc/grub.d/10_linux + # checks if the $GRUB_DEVICE_UUID exists in /dev/disk/by-uuid and falls + # back to $GRUB_DEVICE if it doesn't. + # $GRUB_DEVICE is /dev/mapper/xvd{f,g...}# (on ec2), opposed to /dev/xvda# when booting. + # Creating the symlink ensures that grub consistently uses + # $GRUB_DEVICE_UUID when creating /boot/grub/grub.cfg + self.disk_by_uuid_path = os.path.join('/dev/disk/by-uuid', self.get_uuid()) + if not os.path.exists(self.disk_by_uuid_path): + os.symlink(self.device_path, self.disk_by_uuid_path) + + def unlink_uuid(self): + if os.path.isfile(self.disk_by_uuid_path): + os.remove(self.disk_by_uuid_path) + self.disk_by_uuid_path = None + def _before_create(self, e): """Creates the partition """ from bootstrapvz.common.tools import log_check_call - # The create command is failry simple, start and end are just Bytes objects coerced into strings - create_command = ('mkpart primary {start} {end}' - .format(start=str(self.get_start()), - end=str(self.get_end()))) + # The create command is fairly simple: + # - fs_type is the partition filesystem, as defined by parted: + # fs-type can be one of "fat16", "fat32", "ext2", "HFS", "linux-swap", + # "NTFS", "reiserfs", or "ufs". + # - start and end are just Bytes objects coerced into strings + if self.filesystem == 'swap': + fs_type = 'linux-swap' + else: + fs_type = 'ext2' + create_command = ('mkpart primary {fs_type} {start} {end}' + .format(fs_type=fs_type, + start=str(self.get_start() + self.pad_start), + end=str(self.get_end() - self.pad_end))) # Create the partition log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path, '--', create_command]) @@ -96,7 +123,16 @@ class BasePartition(AbstractPartition): def _before_map(self, e): # Set the device path self.device_path = e.device_path + if e.src == 'unmapped_fmt': + # Only link the uuid if the partition is formatted + self.link_uuid() + + def _after_format(self, e): + # We do this after formatting because there otherwise would be no UUID + self.link_uuid() def _before_unmap(self, e): - # When unmapped, the device_path ifnromation becomes invalid, so we delete it + # When unmapped, the device_path information becomes invalid, so we delete it self.device_path = None + if e.src == 'formatted': + self.unlink_uuid() diff --git a/bootstrapvz/base/fs/partitions/mount.py b/bootstrapvz/base/fs/partitions/mount.py new file mode 100644 index 0000000..7ac7e4b --- /dev/null +++ b/bootstrapvz/base/fs/partitions/mount.py @@ -0,0 +1,49 @@ +from abstract import AbstractPartition +import os.path +from bootstrapvz.common.tools import log_check_call + + +class Mount(object): + """Represents a mount into the partition + """ + def __init__(self, source, destination, opts): + """ + :param str,AbstractPartition source: The path from where we mount or a partition + :param str destination: The path of the mountpoint + :param list opts: List of options to pass to the mount command + """ + self.source = source + self.destination = destination + self.opts = opts + + def mount(self, prefix): + """Performs the mount operation or forwards it to another partition + + :param str prefix: Path prefix of the mountpoint + """ + mount_dir = os.path.join(prefix, self.destination) + # If the source is another partition, we tell that partition to mount itself + if isinstance(self.source, AbstractPartition): + self.source.mount(destination=mount_dir) + else: + log_check_call(['mount'] + self.opts + [self.source, mount_dir]) + self.mount_dir = mount_dir + + def unmount(self): + """Performs the unmount operation or asks the partition to unmount itself + """ + # If its a partition, it can unmount itself + if isinstance(self.source, AbstractPartition): + self.source.unmount() + else: + log_check_call(['umount', self.mount_dir]) + del self.mount_dir + + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/base/fs/partitions/single.py b/bootstrapvz/base/fs/partitions/single.py index 80683bc..e10b74c 100644 --- a/bootstrapvz/base/fs/partitions/single.py +++ b/bootstrapvz/base/fs/partitions/single.py @@ -9,8 +9,7 @@ class SinglePartition(AbstractPartition): """Gets the starting byte of this partition :return: The starting byte of this partition - :rtype: Bytes + :rtype: Sectors """ - from bootstrapvz.common.bytes import Bytes - # On an unpartitioned volume there is no offset and no previous partition - return Bytes(0) + from bootstrapvz.common.sectors import Sectors + return Sectors(0, self.size.sector_size) diff --git a/bootstrapvz/base/fs/volume.py b/bootstrapvz/base/fs/volume.py index 0a17f61..38d4991 100644 --- a/bootstrapvz/base/fs/volume.py +++ b/bootstrapvz/base/fs/volume.py @@ -65,11 +65,12 @@ class Volume(FSMProxy): def _before_link_dm_node(self, e): """Links the volume using the device mapper - This allows us to create a 'window' into the volume that acts like a volum in itself. + This allows us to create a 'window' into the volume that acts like a volume in itself. Mainly it is used to fool grub into thinking that it is working with a real volume, rather than a loopback device or a network block device. :param _e_obj e: Event object containing arguments to create() + Keyword arguments to link_dm_node() are: :param int logical_start_sector: The sector the volume should start at in the new volume @@ -94,9 +95,9 @@ class Volume(FSMProxy): start_sector = getattr(e, 'start_sector', 0) # The number of sectors that should be mapped - sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector) + sectors = getattr(e, 'sectors', int(self.size) - start_sector) - # This is the table we send to dmsetup, so that it may create a decie mapping for us. + # This is the table we send to dmsetup, so that it may create a device mapping for us. table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}' .format(log_start_sec=logical_start_sector, sectors=sectors, diff --git a/bootstrapvz/base/log.py b/bootstrapvz/base/log.py index 83ff97f..8b01b04 100644 --- a/bootstrapvz/base/log.py +++ b/bootstrapvz/base/log.py @@ -4,6 +4,50 @@ both to a file and to the console. import logging +def get_console_handler(debug, colorize): + """Returns a log handler for the console + The handler color codes the different log levels + + :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) + :params bool colorize: Whether to colorize console output + :return: The console logging handler + """ + # Create a console log handler + import sys + console_handler = logging.StreamHandler(sys.stderr) + if colorize: + # We want to colorize the output to the console, so we add a formatter + console_handler.setFormatter(ColorFormatter()) + # Set the log level depending on the debug argument + if debug: + console_handler.setLevel(logging.DEBUG) + else: + console_handler.setLevel(logging.INFO) + return console_handler + + +def get_file_handler(path, debug): + """Returns a log handler for the given path + If the parent directory of the logpath does not exist it will be created + The handler outputs relative timestamps (to when it was created) + + :params str path: The full path to the logfile + :params bool debug: Whether to set the log level to DEBUG (otherwise INFO) + :return: The file logging handler + """ + import os.path + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + # Create the log handler + file_handler = logging.FileHandler(path) + # Absolute timestamps are rather useless when bootstrapping, it's much more interesting + # to see how long things take, so we log in a relative format instead + file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s')) + # The file log handler always logs everything + file_handler.setLevel(logging.DEBUG) + return file_handler + + def get_log_filename(manifest_path): """Returns the path to a logfile given a manifest The logfile name is constructed from the current timestamp and the basename of the manifest @@ -22,42 +66,23 @@ def get_log_filename(manifest_path): return filename -def setup_logger(logfile=None, debug=False): - """Sets up the python logger to log to both a file and the console - - :param str logfile: Path to a logfile - :param bool debug: Whether to log debug output to the console +class SourceFormatter(logging.Formatter): + """Adds a [source] tag to the log message if it exists + The python docs suggest using a LoggingAdapter, but that would mean we'd + have to use it everywhere we log something (and only when called remotely), + which is not feasible. """ - root = logging.getLogger() - # Make sure all logging statements are processed by our handlers, they decide the log level - root.setLevel(logging.NOTSET) - # Only enable logging to file if a destination was supplied - if logfile is not None: - # Create a file log handler - file_handler = logging.FileHandler(logfile) - # Absolute timestamps are rather useless when bootstrapping, it's much more interesting - # to see how long things take, so we log in a relative format instead - file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s')) - # The file log handler always logs everything - file_handler.setLevel(logging.DEBUG) - root.addHandler(file_handler) - - # Create a console log handler - import sys - console_handler = logging.StreamHandler(sys.stderr) - # We want to colorize the output to the console, so we add a formatter - console_handler.setFormatter(ConsoleFormatter()) - # Set the log level depending on the debug argument - if debug: - console_handler.setLevel(logging.DEBUG) - else: - console_handler.setLevel(logging.INFO) - root.addHandler(console_handler) + def format(self, record): + extra = getattr(record, 'extra', {}) + if 'source' in extra: + record.msg = '[{source}] {message}'.format(source=record.extra['source'], + message=record.msg) + return super(SourceFormatter, self).format(record) -class ConsoleFormatter(logging.Formatter): - """Formats log statements for the console +class ColorFormatter(SourceFormatter): + """Colorizes log messages depending on the loglevel """ level_colors = {logging.ERROR: 'red', logging.WARNING: 'magenta', @@ -65,14 +90,13 @@ class ConsoleFormatter(logging.Formatter): } def format(self, record): - if(record.levelno in self.level_colors): - # Colorize the message if we have a color for it (DEBUG has no color) - from termcolor import colored - record.msg = colored(record.msg, self.level_colors[record.levelno]) - return super(ConsoleFormatter, self).format(record) + # Colorize the message if we have a color for it (DEBUG has no color) + from termcolor import colored + record.msg = colored(record.msg, self.level_colors.get(record.levelno, None)) + return super(ColorFormatter, self).format(record) -class FileFormatter(logging.Formatter): +class FileFormatter(SourceFormatter): """Formats log statements for output to file Currently this is just a stub """ diff --git a/bootstrapvz/base/main.py b/bootstrapvz/base/main.py index ec14ea6..74d06aa 100644 --- a/bootstrapvz/base/main.py +++ b/bootstrapvz/base/main.py @@ -1,9 +1,6 @@ """Main module containing all the setup necessary for running the bootstrapping process """ -import logging -log = logging.getLogger(__name__) - def main(): """Main function for invoking the bootstrap process @@ -12,31 +9,30 @@ def main(): """ # Get the commandline arguments opts = get_opts() + # Require root privileges, except when doing a dry-run where they aren't needed import os if os.geteuid() != 0 and not opts['--dry-run']: raise Exception('This program requires root privileges.') - import log - # Log to file unless --log is a single dash - if opts['--log'] != '-': - # Setup logging - if not os.path.exists(opts['--log']): - os.makedirs(opts['--log']) - log_filename = log.get_log_filename(opts['MANIFEST']) - logfile = os.path.join(opts['--log'], log_filename) - else: - logfile = None - log.setup_logger(logfile=logfile, debug=opts['--debug']) + # Set up logging + setup_loggers(opts) + + # Load the manifest + from manifest import Manifest + manifest = Manifest(path=opts['MANIFEST']) # Everything has been set up, begin the bootstrapping process - run(opts) + run(manifest, + debug=opts['--debug'], + pause_on_error=opts['--pause-on-error'], + dry_run=opts['--dry-run']) def get_opts(): """Creates an argument parser and returns the arguments it has parsed """ - from docopt import docopt + import docopt usage = """bootstrap-vz Usage: bootstrap-vz [options] MANIFEST @@ -46,22 +42,55 @@ Options: If is `-' file logging will be disabled. --pause-on-error Pause on error, before rollback --dry-run Don't actually run the tasks + --color=auto|always|never + Colorize the console output [default: auto] --debug Print debugging information -h, --help show this help """ - opts = docopt(usage) + opts = docopt.docopt(usage) + if opts['--color'] not in ('auto', 'always', 'never'): + raise docopt.DocoptExit('Value of --color must be one of auto, always or never.') return opts -def run(opts): - """Runs the bootstrapping process +def setup_loggers(opts): + """Sets up the file and console loggers :params dict opts: Dictionary of options from the commandline """ - # Load the manifest - from manifest import Manifest - manifest = Manifest(opts['MANIFEST']) + import logging + root = logging.getLogger() + root.setLevel(logging.NOTSET) + import log + # Log to file unless --log is a single dash + if opts['--log'] != '-': + import os.path + log_filename = log.get_log_filename(opts['MANIFEST']) + logpath = os.path.join(opts['--log'], log_filename) + file_handler = log.get_file_handler(path=logpath, debug=True) + root.addHandler(file_handler) + + if opts['--color'] == 'never': + colorize = False + elif opts['--color'] == 'always': + colorize = True + else: + # If --color=auto (default), decide whether to colorize by whether stderr is a tty. + import os + colorize = os.isatty(2) + console_handler = log.get_console_handler(debug=opts['--debug'], colorize=colorize) + root.addHandler(console_handler) + + +def run(manifest, debug=False, pause_on_error=False, dry_run=False): + """Runs the bootstrapping process + + :params Manifest manifest: The manifest to run the bootstrapping process for + :params bool debug: Whether to turn debugging mode on + :params bool pause_on_error: Whether to pause on error, before rollback + :params bool dry_run: Don't actually run the tasks + """ # Get the tasklist from tasklist import load_tasks from tasklist import TaskList @@ -71,17 +100,19 @@ def run(opts): # Create the bootstrap information object that'll be used throughout the bootstrapping process from bootstrapinfo import BootstrapInformation - bootstrap_info = BootstrapInformation(manifest=manifest, debug=opts['--debug']) + bootstrap_info = BootstrapInformation(manifest=manifest, debug=debug) + import logging + log = logging.getLogger(__name__) try: # Run all the tasks the tasklist has gathered - tasklist.run(info=bootstrap_info, dry_run=opts['--dry-run']) + tasklist.run(info=bootstrap_info, dry_run=dry_run) # We're done! :-) log.info('Successfully completed bootstrapping') except (Exception, KeyboardInterrupt) as e: # When an error occurs, log it and begin rollback log.exception(e) - if opts['--pause-on-error']: + if pause_on_error: # The --pause-on-error is useful when the user wants to inspect the volume before rollback raw_input('Press Enter to commence rollback') log.error('Rolling back') @@ -89,8 +120,8 @@ def run(opts): # Create a useful little function for the provider and plugins to use, # when figuring out what tasks should be added to the rollback list. def counter_task(taskset, task, counter): - """counter_task() adds the second argument to the rollback tasklist - if the first argument is present in the list of completed tasks + """counter_task() adds the third argument to the rollback tasklist + if the second argument is present in the list of completed tasks :param set taskset: The taskset to add the rollback task to :param Task task: The task to look for in the completed tasks list @@ -105,6 +136,7 @@ def run(opts): rollback_tasklist = TaskList(rollback_tasks) # Run the rollback tasklist - rollback_tasklist.run(info=bootstrap_info, dry_run=opts['--dry-run']) + rollback_tasklist.run(info=bootstrap_info, dry_run=dry_run) log.info('Successfully completed rollback') - raise e + raise + return bootstrap_info diff --git a/bootstrapvz/base/manifest-schema.json b/bootstrapvz/base/manifest-schema.json deleted file mode 100644 index 066c175..0000000 --- a/bootstrapvz/base/manifest-schema.json +++ /dev/null @@ -1,205 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Generic manifest", - "type": "object", - "properties": { - "provider": { - "type": "string" - }, - "bootstrapper": { - "type": "object", - "properties": { - "workspace": { "$ref": "#/definitions/path" }, - "mirror": { "type": "string", "format": "uri" }, - "tarball": { "type": "boolean" }, - "include_packages": { - "type": "array", - "items": { - "type": "string", - "pattern": "^[^/]+$" - }, - "minItems": 1 - }, - "exclude_packages": { - "type": "array", - "items": { - "type": "string", - "pattern": "^[^/]+$" - }, - "minItems": 1 - } - }, - "required": ["workspace"] - }, - "image": { - "type": "object", - "properties": { - "name": { "type": "string" } - }, - "required": ["name"] - }, - "system": { - "type": "object", - "properties": { - "release": { "enum": ["squeeze", "wheezy", "jessie", "testing", "unstable"] }, - "architecture": { "enum": ["i386", "amd64"] }, - "bootloader": { "enum": ["pvgrub", "grub", "extlinux"] }, - "timezone": { "type": "string" }, - "locale": { "type": "string" }, - "charmap": { "type": "string" }, - "hostname": { - "type": "string", - "pattern": "^\\S+$" - } - }, - "required": ["release", "architecture", "bootloader", "timezone", "locale", "charmap"] - }, - "packages": { - "type": "object", - "properties": { - "mirror": { "type": "string", "format": "uri" }, - "sources": { - "type": "object", - "patternProperties": { - "^[^\/\\0]+$": { - "type": "array", - "items": { - "type": "string", - "pattern": "^(deb|deb-src)\\s+(\\[\\s*(.+\\S)?\\s*\\]\\s+)?\\S+\\s+\\S+(\\s+(.+\\S))?\\s*$" - }, - "minItems": 1 - } - }, - "additionalProperties": false, - "minItems": 1 - }, - "components": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1 - }, - "preferences": { - "type": "object", - "patternProperties": { - "^[^\/\\0]+$": { - "type": "array", - "items": { - "type": "object", - "properties": { - "pin": { - "type": "string" - }, - "package": { - "type": "string" - }, - "pin-priority": { - "type": "integer" - } - }, - "required": ["pin", "package", "pin-priority"], - "additionalProperties": false - }, - "minItems": 1 - } - }, - "additionalProperties": false, - "minItems": 1 - }, - "trusted-keys": { - "type": "array", - "items": { "$ref": "#/definitions/absolute_path" }, - "minItems": 1 - }, - "install": { - "type": "array", - "items": { - "anyOf": [ - { "pattern": "^[^/]+(/[^/]+)?$" }, - { "$ref": "#/definitions/absolute_path" } - ] - }, - "minItems": 1 - }, - "install_standard": { - "type": "boolean" - } - }, - "additionalProperties": false - }, - "volume": { - "type": "object", - "properties": { - "backing": { "type": "string" }, - "partitions": { - "type": "object", - "oneOf": [ - { "$ref": "#/definitions/no_partitions" }, - { "$ref": "#/definitions/partition_table" } - ] - } - }, - "required": ["partitions"] - }, - "plugins": { - "type": "object", - "patternProperties": { - "^\\w+$": { - "type": "object" - } - }, - "additionalProperties": false - } - }, - "required": ["provider", "bootstrapper", "system", "volume"], - "definitions": { - "path": { - "type": "string", - "pattern": "^[^\\0]+$" - }, - "absolute_path": { - "type": "string", - "pattern": "^/[^\\0]+$" - }, - "bytes": { - "type": "string", - "pattern": "^\\d+([KMGT]i?B|B)$" - }, - "no_partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none"] }, - "root": { "$ref": "#/definitions/partition" } - }, - "required": ["root"], - "additionalProperties": false - }, - "partition_table": { - "type": "object", - "properties": { - "type": { "enum": ["msdos", "gpt"] }, - "boot": { "$ref": "#/definitions/partition" }, - "root": { "$ref": "#/definitions/partition" }, - "swap": { - "type": "object", - "properties": { "size": { "$ref": "#/definitions/bytes" } }, - "required": ["size"] - } - }, - "required": ["root"], - "additionalProperties": false - }, - "partition": { - "type": "object", - "properties": { - "size": { "$ref": "#/definitions/bytes" }, - "filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] }, - "format_command": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1 - } - }, - "required": ["size", "filesystem"] - } - } -} diff --git a/bootstrapvz/base/manifest-schema.yml b/bootstrapvz/base/manifest-schema.yml new file mode 100644 index 0000000..ed4b3f0 --- /dev/null +++ b/bootstrapvz/base/manifest-schema.yml @@ -0,0 +1,176 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Generic manifest +type: object +required: [provider, bootstrapper, system, volume] +properties: + provider: + type: object + properties: + name: {type: string} + required: [name] + additionalProperties: true + bootstrapper: + type: object + properties: + exclude_packages: + type: array + items: + type: string + pattern: '^[^/]+$' + minItems: 1 + include_packages: + type: array + items: + type: string + pattern: '^[^/]+$' + minItems: 1 + mirror: + type: string + format: uri + tarball: {type: boolean} + workspace: + $ref: '#/definitions/path' + required: [workspace] + additionalProperties: false + image: + type: object + properties: + name: {type: string} + required: [name] + system: + type: object + properties: + architecture: + enum: [i386, amd64] + userspace_architecture: + enum: [i386] + bootloader: + enum: + - pvgrub + - grub + - extlinux + charmap: {type: string} + hostname: + type: string + pattern: ^\S+$ + locale: {type: string} + release: {type: string} + timezone: {type: string} + required: + - release + - architecture + - bootloader + - timezone + - locale + - charmap + additionalProperties: false + packages: + type: object + properties: + components: + type: array + items: {type: string} + minItems: 1 + install: + type: array + items: + anyOf: + - pattern: ^[^/]+(/[^/]+)?$ + - $ref: '#/definitions/absolute_path' + minItems: 1 + install_standard: {type: boolean} + mirror: + type: string + format: uri + preferences: + type: object + patternProperties: + ^[^/\0]+$: + type: array + items: + type: object + properties: + package: {type: string} + pin: {type: string} + pin-priority: {type: integer} + required: [pin, package, pin-priority] + additionalProperties: false + minItems: 1 + minItems: 1 + additionalProperties: false + sources: + type: object + patternProperties: + ^[^/\0]+$: + items: + type: string + pattern: ^(deb|deb-src)\s+(\[\s*(.+\S)?\s*\]\s+)?\S+\s+\S+(\s+(.+\S))?\s*$ + minItems: 1 + type: array + minItems: 1 + additionalProperties: false + trusted-keys: + type: array + items: + $ref: '#/definitions/absolute_path' + minItems: 1 + include-source-type: {type: boolean} + additionalProperties: false + plugins: + type: object + patternProperties: + ^\w+$: {type: object} + volume: + type: object + properties: + backing: {type: string} + partitions: + type: object + oneOf: + - $ref: '#/definitions/no_partitions' + - $ref: '#/definitions/partition_table' + required: [partitions] + additionalProperties: false +definitions: + absolute_path: + type: string + pattern: ^/[^\0]+$ + bytes: + pattern: ^\d+([KMGT]i?B|B)$ + type: string + no_partitions: + type: object + properties: + root: {$ref: '#/definitions/partition'} + type: {enum: [none]} + required: [root] + additionalProperties: false + partition: + type: object + properties: + filesystem: + enum: [ext2, ext3, ext4, xfs] + format_command: + items: {type: string} + minItems: 1 + type: array + size: {$ref: '#/definitions/bytes'} + required: [size, filesystem] + additionalProperties: false + partition_table: + type: object + properties: + boot: {$ref: '#/definitions/partition'} + root: {$ref: '#/definitions/partition'} + swap: + type: object + properties: + size: {$ref: '#/definitions/bytes'} + required: [size] + type: {enum: [msdos, gpt]} + required: [root] + additionalProperties: false + path: + type: string + pattern: ^[^\0]+$ diff --git a/bootstrapvz/base/manifest.py b/bootstrapvz/base/manifest.py index e1d6d95..2e04d42 100644 --- a/bootstrapvz/base/manifest.py +++ b/bootstrapvz/base/manifest.py @@ -2,8 +2,8 @@ to determine which tasks should be added to the tasklist, what arguments various invocations should have etc.. """ -from bootstrapvz.common.tools import load_json -from bootstrapvz.common.tools import load_yaml +from bootstrapvz.common.exceptions import ManifestError +from bootstrapvz.common.tools import load_data import logging log = logging.getLogger(__name__) @@ -15,31 +15,47 @@ class Manifest(object): Currently, immutability is not enforced and it would require a fair amount of code to enforce it, instead we just rely on tasks behaving properly. """ - def __init__(self, path): - """Initializer: Given a path we load, validate and parse the manifest. - :param str path: The path to the manifest + def __init__(self, path=None, data=None): + """Initializer: Given a path we load, validate and parse the manifest. + To create the manifest from dynamic data instead of the contents of a file, + provide a properly constructed dict as the data argument. + + :param str path: The path to the manifest (ignored, when `data' is provided) + :param str data: The manifest data, if it is not None, it will be used instead of the contents of `path' """ + if path is None and data is None: + raise ManifestError('`path\' or `data\' must be provided') self.path = path - self.load() + self.load(data) + self.initialize() self.validate() self.parse() - def load(self): - """Loads the manifest. - This function not only reads the manifest but also loads the specified provider and plugins. - Once they are loaded, the initialize() function is called on each of them (if it exists). + def load(self, data=None): + """Loads the manifest and performs a basic validation. + This function reads the manifest and performs some basic validation of + the manifest itself to ensure that the properties required for initalization are accessible + (otherwise the user would be presented with some cryptic error messages). + """ + if data is None: + self.data = load_data(self.path) + else: + self.data = data + + from . import validate_manifest + # Validate the manifest with the base validation function in __init__ + validate_manifest(self.data, self.schema_validator, self.validation_error) + + def initialize(self): + """Initializes the provider and the plugins. + This function loads the specified provider and plugins. + Once the provider and plugins are loaded, + the initialize() function is called on each of them (if it exists). The provider must have an initialize function. """ - # Load the manifest JSON using the loader in common.tools - # It strips comments (which are invalid in strict json) before loading the data. - if self.path.endswith('.json'): - self.data = load_json(self.path) - elif self.path.endswith('.yml') or self.path.endswith('.yaml'): - self.data = load_yaml(self.path) - # Get the provider name from the manifest and load the corresponding module - provider_modname = 'bootstrapvz.providers.' + self.data['provider'] + provider_modname = 'bootstrapvz.providers.' + self.data['provider']['name'] log.debug('Loading provider ' + provider_modname) # Create a modules dict that contains the loaded provider and plugins import importlib @@ -63,12 +79,9 @@ class Manifest(object): init() def validate(self): - """Validates the manifest using the base, provider and plugin validation functions. + """Validates the manifest using the provider and plugin validation functions. Plugins are not required to have a validate_manifest function """ - from . import validate_manifest - # Validate the manifest with the base validation function in __init__ - validate_manifest(self.data, self.schema_validator, self.validation_error) # Run the provider validation self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error) @@ -90,6 +103,8 @@ class Manifest(object): self.image = self.data['image'] self.volume = self.data['volume'] self.system = self.data['system'] + from bootstrapvz.common.releases import get_release + self.release = get_release(self.system['release']) # The packages and plugins section is not required self.packages = self.data['packages'] if 'packages' in self.data else {} self.plugins = self.data['plugins'] if 'plugins' in self.data else {} @@ -102,19 +117,31 @@ class Manifest(object): :param str schema_path: Path to the json-schema to use for validation """ import jsonschema - schema = load_json(schema_path) + + schema = load_data(schema_path) try: jsonschema.validate(data, schema) except jsonschema.ValidationError as e: self.validation_error(e.message, e.path) - def validation_error(self, message, json_path=None): + def validation_error(self, message, data_path=None): """This function is passed to all validation functions so that they may raise a validation error because a custom validation of the manifest failed. :param str message: Message to user about the error - :param list json_path: A path to the location in the manifest where the error occurred + :param list data_path: A path to the location in the manifest where the error occurred :raises ManifestError: With absolute certainty """ - from bootstrapvz.common.exceptions import ManifestError - raise ManifestError(message, self.path, json_path) + raise ManifestError(message, self.path, data_path) + + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'path': self.path, + 'data': self.data} + + def __setstate__(self, state): + self.path = state['path'] + self.load(state['data']) + self.initialize() + self.validate() + self.parse() diff --git a/bootstrapvz/base/pkg/packagelist.py b/bootstrapvz/base/pkg/packagelist.py index 7bd1908..5d5c0c7 100644 --- a/bootstrapvz/base/pkg/packagelist.py +++ b/bootstrapvz/base/pkg/packagelist.py @@ -87,12 +87,10 @@ class PackageList(object): # The package has already been added, skip the checks below return - # Check if the target exists in the sources list, raise a PackageError if not - check_target = target - if check_target is None: - check_target = self.default_target - if not self.source_lists.target_exists(check_target): - msg = ('The target release {target} was not found in the sources list').format(target=check_target) + # Check if the target exists (unless it's the default target) in the sources list + # raise a PackageError if does not + if target not in (None, self.default_target) and not self.source_lists.target_exists(target): + msg = ('The target release {target} was not found in the sources list').format(target=target) raise PackageError(msg) # Note that we maintain the target value even if it is none. diff --git a/bootstrapvz/base/release-codenames.json b/bootstrapvz/base/release-codenames.json deleted file mode 100644 index cac8692..0000000 --- a/bootstrapvz/base/release-codenames.json +++ /dev/null @@ -1,22 +0,0 @@ -{ // This is a mapping of Debian release names to their respective codenames - "unstable": "sid", - "testing": "jessie", - "stable": "wheezy", - "oldstable": "squeeze", - - "jessie": "jessie", - "wheezy": "wheezy", - "squeeze": "squeeze", - - // The following release names are not supported, but included of completeness sake - "lenny": "lenny", - "etch": "etch", - "sarge": "sarge", - "woody": "woody", - "potato": "potato", - "slink": "slink", - "hamm": "hamm", - "bo": "bo", - "rex": "rex", - "buzz": "buzz" -} diff --git a/bootstrapvz/base/tasklist.py b/bootstrapvz/base/tasklist.py index 8e5dbb1..2af2b1d 100644 --- a/bootstrapvz/base/tasklist.py +++ b/bootstrapvz/base/tasklist.py @@ -117,7 +117,8 @@ def get_all_tasks(): # Get a generator that returns all classes in the package import os.path pkg_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '..')) - classes = get_all_classes(pkg_path, 'bootstrapvz.') + exclude_pkgs = ['bootstrapvz.base', 'bootstrapvz.remote'] + classes = get_all_classes(pkg_path, 'bootstrapvz.', exclude_pkgs) # lambda function to check whether a class is a task (excluding the superclass Task) def is_task(obj): @@ -126,11 +127,12 @@ def get_all_tasks(): return filter(is_task, classes) # Only return classes that are tasks -def get_all_classes(path=None, prefix=''): +def get_all_classes(path=None, prefix='', excludes=[]): """ Given a path to a package, this function retrieves all the classes in it :param str path: Path to the package :param str prefix: Name of the package followed by a dot + :param list excludes: List of str matching module names that should be ignored :return: A generator that yields classes :rtype: generator :raises Exception: If a module cannot be inspected. @@ -139,10 +141,13 @@ def get_all_classes(path=None, prefix=''): import importlib import inspect - def walk_error(module): - raise Exception('Unable to inspect module ' + module) + def walk_error(module_name): + if not any(map(lambda excl: module_name.startswith(excl), excludes)): + raise Exception('Unable to inspect module ' + module_name) walker = pkgutil.walk_packages([path], prefix, walk_error) for _, module_name, _ in walker: + if any(map(lambda excl: module_name.startswith(excl), excludes)): + continue module = importlib.import_module(module_name) classes = inspect.getmembers(module, inspect.isclass) for class_name, obj in classes: @@ -162,21 +167,31 @@ def check_ordering(task): :raises TaskListError: If there is a conflict between task precedence and phase precedence """ for successor in task.successors: - # Run through all successors and check whether the phase of the task - # comes before the phase of a successor + # Run through all successors and throw an error if the phase of the task + # lies before the phase of a successor, log a warning if it lies after. if task.phase > successor.phase: msg = ("The task {task} is specified as running before {other}, " "but its phase '{phase}' lies after the phase '{other_phase}'" .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) raise TaskListError(msg) + if task.phase < successor.phase: + log.warn("The task {task} is specified as running before {other} " + "although its phase '{phase}' already lies before the phase '{other_phase}' " + "(or the task has been placed in the wrong phase)" + .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) for predecessor in task.predecessors: - # Run through all predecessors and check whether the phase of the task - # comes after the phase of a predecessor + # Run through all successors and throw an error if the phase of the task + # lies after the phase of a predecessor, log a warning if it lies before. if task.phase < predecessor.phase: msg = ("The task {task} is specified as running after {other}, " "but its phase '{phase}' lies before the phase '{other_phase}'" .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) raise TaskListError(msg) + if task.phase > predecessor.phase: + log.warn("The task {task} is specified as running after {other} " + "although its phase '{phase}' already lies after the phase '{other_phase}' " + "(or the task has been placed in the wrong phase)" + .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) def strongly_connected_components(graph): diff --git a/bootstrapvz/common/assets/extlinux/boot.txt b/bootstrapvz/common/assets/extlinux/boot.txt new file mode 100644 index 0000000..ddef765 --- /dev/null +++ b/bootstrapvz/common/assets/extlinux/boot.txt @@ -0,0 +1 @@ +Wait 5 seconds or press ENTER to diff --git a/bootstrapvz/common/assets/extlinux/extlinux.conf b/bootstrapvz/common/assets/extlinux/extlinux.conf new file mode 100644 index 0000000..68ce106 --- /dev/null +++ b/bootstrapvz/common/assets/extlinux/extlinux.conf @@ -0,0 +1,17 @@ +default l0 +prompt 1 +timeout 50 + + +label l0 + menu label Debian GNU/Linux, kernel {kernel_version} + linux {boot_prefix}/vmlinuz-{kernel_version} + append initrd={boot_prefix}/initrd.img-{kernel_version} root=UUID={root_uuid} ro quiet console=ttyS0 + +label l0r + menu label Debian GNU/Linux, kernel {kernel_version} (recovery mode) + linux {boot_prefix}/vmlinuz-{kernel_version} + append initrd={boot_prefix}/initrd.img-{kernel_version} root=UUID={root_uuid} ro console=ttyS0 single + text help + This option boots the system into recovery mode (single-user) + endtext diff --git a/bootstrapvz/common/assets/systemd/logind.conf b/bootstrapvz/common/assets/systemd/logind.conf new file mode 100644 index 0000000..b6c4d12 --- /dev/null +++ b/bootstrapvz/common/assets/systemd/logind.conf @@ -0,0 +1,5 @@ + +[Login] +# Disable all TTY getters +NAutoVTs=0 +ReserveVT=0 diff --git a/bootstrapvz/common/bytes.py b/bootstrapvz/common/bytes.py index cd6d9c4..f1e48ba 100644 --- a/bootstrapvz/common/bytes.py +++ b/bootstrapvz/common/bytes.py @@ -1,3 +1,14 @@ +from exceptions import UnitError + + +def onlybytes(msg): + def decorator(func): + def check_other(self, other): + if not isinstance(other, Bytes): + raise UnitError(msg) + return func(self, other) + return check_other + return decorator class Bytes(object): @@ -61,25 +72,45 @@ class Bytes(object): def __long__(self): return self.qty + @onlybytes('Can only compare Bytes to Bytes') + def __lt__(self, other): + return self.qty < other.qty + + @onlybytes('Can only compare Bytes to Bytes') + def __le__(self, other): + return self.qty <= other.qty + + @onlybytes('Can only compare Bytes to Bytes') + def __eq__(self, other): + return self.qty == other.qty + + @onlybytes('Can only compare Bytes to Bytes') + def __ne__(self, other): + return self.qty != other.qty + + @onlybytes('Can only compare Bytes to Bytes') + def __ge__(self, other): + return self.qty >= other.qty + + @onlybytes('Can only compare Bytes to Bytes') + def __gt__(self, other): + return self.qty > other.qty + + @onlybytes('Can only add Bytes to Bytes') def __add__(self, other): - if not isinstance(other, Bytes): - raise UnitError('Can only add Bytes to Bytes') return Bytes(self.qty + other.qty) + @onlybytes('Can only add Bytes to Bytes') def __iadd__(self, other): - if not isinstance(other, Bytes): - raise UnitError('Can only add Bytes to Bytes') self.qty += other.qty return self + @onlybytes('Can only subtract Bytes from Bytes') def __sub__(self, other): - if not isinstance(other, Bytes): - raise UnitError('Can only subtract Bytes from Bytes') return Bytes(self.qty - other.qty) + @onlybytes('Can only subtract Bytes from Bytes') def __isub__(self, other): - if not isinstance(other, Bytes): - raise UnitError('Can only subtract Bytes from Bytes') self.qty -= other.qty return self @@ -110,22 +141,19 @@ class Bytes(object): self.qty /= other return self + @onlybytes('Can only take modulus of Bytes with Bytes') def __mod__(self, other): - if isinstance(other, Bytes): - return self.qty % other.qty - if not isinstance(other, (int, long)): - raise UnitError('Can only take modulus of Bytes with integers or Bytes') - return Bytes(self.qty % other) + return Bytes(self.qty % other.qty) + @onlybytes('Can only take modulus of Bytes with Bytes') def __imod__(self, other): - if isinstance(other, Bytes): - self.qty %= other.qty - else: - if not isinstance(other, (int, long)): - raise UnitError('Can only divide Bytes with integers or Bytes') - self.qty %= other + self.qty %= other.qty return self + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'qty': self.qty, + } -class UnitError(Exception): - pass + def __setstate__(self, state): + self.qty = state['qty'] diff --git a/bootstrapvz/common/exceptions.py b/bootstrapvz/common/exceptions.py index 940d2b0..4ea5725 100644 --- a/bootstrapvz/common/exceptions.py +++ b/bootstrapvz/common/exceptions.py @@ -1,22 +1,26 @@ class ManifestError(Exception): - def __init__(self, message, manifest_path, json_path=None): + def __init__(self, message, manifest_path, data_path=None): + super(ManifestError, self).__init__(message) self.message = message self.manifest_path = manifest_path - self.json_path = json_path + self.data_path = data_path + self.args = (self.message, self.manifest_path, self.data_path) def __str__(self): - if self.json_path is not None: - path = '.'.join(map(str, self.json_path)) - return ('{msg}\n File path: {file}\n JSON path: {jsonpath}' - .format(msg=self.message, file=self.manifest_path, jsonpath=path)) + if self.data_path is not None: + path = '.'.join(map(str, self.data_path)) + return ('{msg}\n File path: {file}\n Data path: {datapath}' + .format(msg=self.message, file=self.manifest_path, datapath=path)) return '{file}: {msg}'.format(msg=self.message, file=self.manifest_path) class TaskListError(Exception): def __init__(self, message): + super(TaskListError, self).__init__(message) self.message = message + self.args = (self.message,) def __str__(self): return 'Error in tasklist: ' + self.message @@ -24,3 +28,11 @@ class TaskListError(Exception): class TaskError(Exception): pass + + +class UnexpectedNumMatchesError(Exception): + pass + + +class UnitError(Exception): + pass diff --git a/bootstrapvz/common/fs/__init__.py b/bootstrapvz/common/fs/__init__.py index 694846e..c393ea2 100644 --- a/bootstrapvz/common/fs/__init__.py +++ b/bootstrapvz/common/fs/__init__.py @@ -1,3 +1,4 @@ +from contextlib import contextmanager def get_partitions(): @@ -16,7 +17,8 @@ def get_partitions(): return matches -def remount(volume, fn): +@contextmanager +def unmounted(volume): from bootstrapvz.base.fs.partitionmaps.none import NoPartitions p_map = volume.partition_map @@ -24,9 +26,8 @@ def remount(volume, fn): p_map.root.unmount() if not isinstance(p_map, NoPartitions): p_map.unmap(volume) - result = fn() + yield p_map.map(volume) else: - result = fn() + yield p_map.root.mount(destination=root_dir) - return result diff --git a/bootstrapvz/common/fs/loopbackvolume.py b/bootstrapvz/common/fs/loopbackvolume.py index 8f6f7cd..248f3d7 100644 --- a/bootstrapvz/common/fs/loopbackvolume.py +++ b/bootstrapvz/common/fs/loopbackvolume.py @@ -11,8 +11,8 @@ class LoopbackVolume(Volume): def _before_create(self, e): self.image_path = e.image_path - vol_size = str(self.size.get_qty_in('MiB')) + 'M' - log_check_call(['qemu-img', 'create', '-f', 'raw', self.image_path, vol_size]) + size_opt = '--size={mib}M'.format(mib=self.size.bytes.get_qty_in('MiB')) + log_check_call(['truncate', size_opt, self.image_path]) def _before_attach(self, e): [self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path]) diff --git a/bootstrapvz/common/fs/qemuvolume.py b/bootstrapvz/common/fs/qemuvolume.py index 605e77c..0e8c2f0 100644 --- a/bootstrapvz/common/fs/qemuvolume.py +++ b/bootstrapvz/common/fs/qemuvolume.py @@ -8,7 +8,7 @@ class QEMUVolume(LoopbackVolume): def _before_create(self, e): self.image_path = e.image_path - vol_size = str(self.size.get_qty_in('MiB')) + 'M' + vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M' log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size]) def _check_nbd_module(self): @@ -23,7 +23,8 @@ class QEMUVolume(LoopbackVolume): num_partitions = len(self.partition_map.partitions) if not self._module_loaded('nbd'): msg = ('The kernel module `nbd\' must be loaded ' - '(`modprobe nbd max_part={num_partitions}\') to attach .{extension} images' + '(run `modprobe nbd max_part={num_partitions}\') ' + 'to attach .{extension} images' .format(num_partitions=num_partitions, extension=self.extension)) raise VolumeError(msg) nbd_max_part = int(self._module_param('nbd', 'max_part')) @@ -76,3 +77,7 @@ class QEMUVolume(LoopbackVolume): if not self._is_nbd_used(device_name): return os.path.join('/dev', device_name) raise VolumeError('Unable to find free nbd device.') + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] diff --git a/bootstrapvz/common/fsm_proxy.py b/bootstrapvz/common/fsm_proxy.py index bf02c73..a968b92 100644 --- a/bootstrapvz/common/fsm_proxy.py +++ b/bootstrapvz/common/fsm_proxy.py @@ -43,6 +43,19 @@ class FSMProxy(object): if not hasattr(self, event): setattr(self, event, make_proxy(fsm, event)) + def __getstate__(self): + state = {} + for key, value in self.__dict__.iteritems(): + if callable(value) or key == 'fsm': + continue + state[key] = value + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] + class FSMProxyError(Exception): pass diff --git a/bootstrapvz/common/phases.py b/bootstrapvz/common/phases.py index e83feab..99f39a7 100644 --- a/bootstrapvz/common/phases.py +++ b/bootstrapvz/common/phases.py @@ -7,6 +7,7 @@ volume_mounting = Phase('Volume mounting', 'Mounting bootstrap volume') os_installation = Phase('OS installation', 'Installing the operating system') package_installation = Phase('Package installation', 'Installing software') system_modification = Phase('System modification', 'Modifying configuration files, adding resources, etc.') +user_modification = Phase('User modification', 'Running user specified modifications') system_cleaning = Phase('System cleaning', 'Removing sensitive data, temporary files and other leftovers') volume_unmounting = Phase('Volume unmounting', 'Unmounting the bootstrap volume') image_registration = Phase('Image registration', 'Uploading/Registering with the provider') @@ -19,6 +20,7 @@ order = [preparation, os_installation, package_installation, system_modification, + user_modification, system_cleaning, volume_unmounting, image_registration, diff --git a/bootstrapvz/common/releases.py b/bootstrapvz/common/releases.py new file mode 100644 index 0000000..558b4b1 --- /dev/null +++ b/bootstrapvz/common/releases.py @@ -0,0 +1,68 @@ + + +class _Release(object): + def __init__(self, codename, version): + self.codename = codename + self.version = version + + def __cmp__(self, other): + return self.version - other.version + + def __str__(self): + return self.codename + + def __getstate__(self): + state = self.__dict__.copy() + state['__class__'] = self.__module__ + '.' + self.__class__.__name__ + return state + + def __setstate__(self, state): + for key in state: + self.__dict__[key] = state[key] + + +class _ReleaseAlias(_Release): + def __init__(self, alias, release): + self.alias = alias + self.release = release + super(_ReleaseAlias, self).__init__(self.release.codename, self.release.version) + + def __str__(self): + return self.alias + + +sid = _Release('sid', 10) +stretch = _Release('stretch', 9) +jessie = _Release('jessie', 8) +wheezy = _Release('wheezy', 7) +squeeze = _Release('squeeze', 6.0) +lenny = _Release('lenny', 5.0) +etch = _Release('etch', 4.0) +sarge = _Release('sarge', 3.1) +woody = _Release('woody', 3.0) +potato = _Release('potato', 2.2) +slink = _Release('slink', 2.1) +hamm = _Release('hamm', 2.0) +bo = _Release('bo', 1.3) +rex = _Release('rex', 1.2) +buzz = _Release('buzz', 1.1) + +unstable = _ReleaseAlias('unstable', sid) +testing = _ReleaseAlias('testing', stretch) +stable = _ReleaseAlias('stable', jessie) +oldstable = _ReleaseAlias('oldstable', wheezy) + + +def get_release(release_name): + """Normalizes the release codenames + This allows tasks to query for release codenames rather than 'stable', 'unstable' etc. + """ + from . import releases + release = getattr(releases, release_name, None) + if release is None or not isinstance(release, _Release): + raise UnknownReleaseException('The release `{name}\' is unknown'.format(name=release)) + return release + + +class UnknownReleaseException(Exception): + pass diff --git a/bootstrapvz/common/sectors.py b/bootstrapvz/common/sectors.py new file mode 100644 index 0000000..d658140 --- /dev/null +++ b/bootstrapvz/common/sectors.py @@ -0,0 +1,178 @@ +from exceptions import UnitError +from bytes import Bytes + + +def onlysectors(msg): + def decorator(func): + def check_other(self, other): + if not isinstance(other, Sectors): + raise UnitError(msg) + return func(self, other) + return check_other + return decorator + + +class Sectors(object): + + def __init__(self, quantity, sector_size): + if isinstance(sector_size, Bytes): + self.sector_size = sector_size + else: + self.sector_size = Bytes(sector_size) + + if isinstance(quantity, Bytes): + self.bytes = quantity + else: + if isinstance(quantity, (int, long)): + self.bytes = self.sector_size * quantity + else: + self.bytes = Bytes(quantity) + + def get_sectors(self): + return self.bytes / self.sector_size + + def __repr__(self): + return str(self.get_sectors()) + 's' + + def __str__(self): + return self.__repr__() + + def __int__(self): + return self.get_sectors() + + def __long__(self): + return self.get_sectors() + + @onlysectors('Can only compare sectors with sectors') + def __lt__(self, other): + return self.bytes < other.bytes + + @onlysectors('Can only compare sectors with sectors') + def __le__(self, other): + return self.bytes <= other.bytes + + @onlysectors('Can only compare sectors with sectors') + def __eq__(self, other): + return self.bytes == other.bytes + + @onlysectors('Can only compare sectors with sectors') + def __ne__(self, other): + return self.bytes != other.bytes + + @onlysectors('Can only compare sectors with sectors') + def __ge__(self, other): + return self.bytes >= other.bytes + + @onlysectors('Can only compare sectors with sectors') + def __gt__(self, other): + return self.bytes > other.bytes + + def __add__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes + self.sector_size * other, self.sector_size) + if isinstance(other, Bytes): + return Sectors(self.bytes + other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot sum sectors with different sector sizes') + return Sectors(self.bytes + other.bytes, self.sector_size) + raise UnitError('Can only add sectors, bytes or integers to sectors') + + def __iadd__(self, other): + if isinstance(other, (int, long)): + self.bytes += self.sector_size * other + return self + if isinstance(other, Bytes): + self.bytes += other + return self + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot sum sectors with different sector sizes') + self.bytes += other.bytes + return self + raise UnitError('Can only add sectors, bytes or integers to sectors') + + def __sub__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes - self.sector_size * other, self.sector_size) + if isinstance(other, Bytes): + return Sectors(self.bytes - other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot subtract sectors with different sector sizes') + return Sectors(self.bytes - other.bytes, self.sector_size) + raise UnitError('Can only subtract sectors, bytes or integers from sectors') + + def __isub__(self, other): + if isinstance(other, (int, long)): + self.bytes -= self.sector_size * other + return self + if isinstance(other, Bytes): + self.bytes -= other + return self + if isinstance(other, Sectors): + if self.sector_size != other.sector_size: + raise UnitError('Cannot subtract sectors with different sector sizes') + self.bytes -= other.bytes + return self + raise UnitError('Can only subtract sectors, bytes or integers from sectors') + + def __mul__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes * other, self.sector_size) + else: + raise UnitError('Can only multiply sectors with integers') + + def __imul__(self, other): + if isinstance(other, (int, long)): + self.bytes *= other + return self + else: + raise UnitError('Can only multiply sectors with integers') + + def __div__(self, other): + if isinstance(other, (int, long)): + return Sectors(self.bytes / other, self.sector_size) + if isinstance(other, Sectors): + if self.sector_size == other.sector_size: + return self.bytes / other.bytes + else: + raise UnitError('Cannot divide sectors with different sector sizes') + raise UnitError('Can only divide sectors with integers or sectors') + + def __idiv__(self, other): + if isinstance(other, (int, long)): + self.bytes /= other + return self + if isinstance(other, Sectors): + if self.sector_size == other.sector_size: + self.bytes /= other.bytes + return self + else: + raise UnitError('Cannot divide sectors with different sector sizes') + raise UnitError('Can only divide sectors with integers or sectors') + + @onlysectors('Can only take modulus of sectors with sectors') + def __mod__(self, other): + if self.sector_size == other.sector_size: + return Sectors(self.bytes % other.bytes, self.sector_size) + else: + raise UnitError('Cannot take modulus of sectors with different sector sizes') + + @onlysectors('Can only take modulus of sectors with sectors') + def __imod__(self, other): + if self.sector_size == other.sector_size: + self.bytes %= other.bytes + return self + else: + raise UnitError('Cannot take modulus of sectors with different sector sizes') + + def __getstate__(self): + return {'__class__': self.__module__ + '.' + self.__class__.__name__, + 'sector_size': self.sector_size, + 'bytes': self.bytes, + } + + def __setstate__(self, state): + self.sector_size = state['sector_size'] + self.bytes = state['bytes'] diff --git a/bootstrapvz/common/task_groups.py b/bootstrapvz/common/task_groups.py index c10c177..ab463f0 100644 --- a/bootstrapvz/common/task_groups.py +++ b/bootstrapvz/common/task_groups.py @@ -1,7 +1,8 @@ from tasks import workspace from tasks import packages from tasks import host -from tasks import boot +from tasks import grub +from tasks import extlinux from tasks import bootstrap from tasks import volume from tasks import loopback @@ -14,6 +15,7 @@ from tasks import locale from tasks import network from tasks import initd from tasks import ssh +from tasks import kernel def get_standard_groups(manifest): @@ -25,12 +27,13 @@ def get_standard_groups(manifest): if 'boot' in manifest.volume['partitions']: group.extend(boot_partition_group) group.extend(mounting_group) + group.extend(kernel_group) group.extend(get_fs_specific_group(manifest)) group.extend(get_network_group(manifest)) group.extend(get_apt_group(manifest)) group.extend(security_group) group.extend(locale_group) - group.extend(bootloader_group.get(manifest.system['bootloader'], [])) + group.extend(get_bootloader_group(manifest)) group.extend(cleanup_group) return group @@ -71,10 +74,16 @@ boot_partition_group = [filesystem.CreateBootMountDir, mounting_group = [filesystem.CreateMountDir, filesystem.MountRoot, filesystem.MountSpecials, + filesystem.CopyMountTable, + filesystem.RemoveMountTable, filesystem.UnmountRoot, filesystem.DeleteMountDir, ] +kernel_group = [kernel.DetermineKernelVersion, + kernel.UpdateInitramfs, + ] + ssh_group = [ssh.AddOpenSSHPackage, ssh.DisableSSHPasswordAuthentication, ssh.DisableSSHDNSLookup, @@ -126,9 +135,25 @@ locale_group = [locale.LocaleBootstrapPackage, ] -bootloader_group = {'grub': [boot.AddGrubPackage, boot.ConfigureGrub, boot.InstallGrub], - 'extlinux': [boot.AddExtlinuxPackage, boot.InstallExtLinux], - } +def get_bootloader_group(manifest): + from bootstrapvz.common.releases import jessie + group = [] + if manifest.system['bootloader'] == 'grub': + group.extend([grub.AddGrubPackage, + grub.ConfigureGrub]) + if manifest.release < jessie: + group.append(grub.InstallGrub_1_99) + else: + group.append(grub.InstallGrub_2) + if manifest.system['bootloader'] == 'extlinux': + group.append(extlinux.AddExtlinuxPackage) + if manifest.release < jessie: + group.extend([extlinux.ConfigureExtlinux, + extlinux.InstallExtlinux]) + else: + group.extend([extlinux.ConfigureExtlinuxJessie, + extlinux.InstallExtlinuxJessie]) + return group def get_fs_specific_group(manifest): diff --git a/bootstrapvz/common/tasks/apt.py b/bootstrapvz/common/tasks/apt.py index 91ae553..a93deb9 100644 --- a/bootstrapvz/common/tasks/apt.py +++ b/bootstrapvz/common/tasks/apt.py @@ -1,7 +1,8 @@ from bootstrapvz.base import Task -from .. import phases -from ..tools import log_check_call +from bootstrapvz.common import phases +from bootstrapvz.common.tools import log_check_call import locale +import logging import os @@ -23,14 +24,37 @@ class AddDefaultSources(Task): @classmethod def run(cls, info): + from bootstrapvz.common.releases import sid + include_src = info.manifest.packages.get('include-source-type', False) components = ' '.join(info.manifest.packages.get('components', ['main'])) info.source_lists.add('main', 'deb {apt_mirror} {system.release} ' + components) - info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} ' + components) - if info.release_codename != 'sid': + if include_src: + info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} ' + components) + if info.manifest.release != sid: info.source_lists.add('main', 'deb http://security.debian.org/ {system.release}/updates ' + components) - info.source_lists.add('main', 'deb-src http://security.debian.org/ {system.release}/updates ' + components) + if include_src: + info.source_lists.add('main', 'deb-src http://security.debian.org/ {system.release}/updates ' + components) info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates ' + components) - info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates ' + components) + if include_src: + info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates ' + components) + + +class AddBackports(Task): + description = 'Adding backports to the apt sources' + phase = phases.preparation + predecessors = [AddDefaultSources] + + @classmethod + def run(cls, info): + from bootstrapvz.common.releases import unstable + if info.source_lists.target_exists('{system.release}-backports'): + msg = ('{system.release}-backports target already exists').format(**info.manifest_vars) + logging.getLogger(__name__).info(msg) + elif info.manifest.release == unstable: + logging.getLogger(__name__).info('There are no backports for sid/unstable') + else: + info.source_lists.add('backports', 'deb {apt_mirror} {system.release}-backports main') + info.source_lists.add('backports', 'deb-src {apt_mirror} {system.release}-backports main') class AddManifestPreferences(Task): @@ -63,6 +87,11 @@ class WriteSources(Task): @classmethod def run(cls, info): + if not info.source_lists.target_exists(info.manifest.system['release']): + import logging + log = logging.getLogger(__name__) + log.warn('No default target has been specified in the sources list, ' + 'installing packages may fail') for name, sources in info.source_lists.sources.iteritems(): if name == 'main': list_path = os.path.join(info.root, 'etc/apt/sources.list') @@ -137,12 +166,11 @@ class AptUpgrade(Task): '--assume-yes']) except CalledProcessError as e: if e.returncode == 100: - import logging msg = ('apt exited with status code 100. ' 'This can sometimes occur when package retrieval times out or a package extraction failed. ' 'apt might succeed if you try bootstrapping again.') logging.getLogger(__name__).warn(msg) - raise e + raise class PurgeUnusedPackages(Task): @@ -153,7 +181,8 @@ class PurgeUnusedPackages(Task): def run(cls, info): log_check_call(['chroot', info.root, 'apt-get', 'autoremove', - '--purge']) + '--purge', + '--assume-yes']) class AptClean(Task): diff --git a/bootstrapvz/common/tasks/boot.py b/bootstrapvz/common/tasks/boot.py index 05dab75..afa257f 100644 --- a/bootstrapvz/common/tasks/boot.py +++ b/bootstrapvz/common/tasks/boot.py @@ -1,21 +1,31 @@ from bootstrapvz.base import Task from .. import phases -import apt -import filesystem -from bootstrapvz.base.fs import partitionmaps import os.path +from . import assets + + +class UpdateInitramfs(Task): + description = 'Updating initramfs' + phase = phases.system_modification + + @classmethod + def run(cls, info): + from ..tools import log_check_call + log_check_call(['chroot', info.root, 'update-initramfs', '-u']) class BlackListModules(Task): description = 'Blacklisting kernel modules' phase = phases.system_modification + successors = [UpdateInitramfs] @classmethod def run(cls, info): blacklist_path = os.path.join(info.root, 'etc/modprobe.d/blacklist.conf') with open(blacklist_path, 'a') as blacklist: - blacklist.write(('# disable pc speaker\n' - 'blacklist pcspkr')) + blacklist.write(('# disable pc speaker and floppy\n' + 'blacklist pcspkr\n' + 'blacklist floppy\n')) class DisableGetTTYs(Task): @@ -24,129 +34,19 @@ class DisableGetTTYs(Task): @classmethod def run(cls, info): - from ..tools import sed_i - inittab_path = os.path.join(info.root, 'etc/inittab') - tty1 = '1:2345:respawn:/sbin/getty 38400 tty1' - sed_i(inittab_path, '^' + tty1, '#' + tty1) - ttyx = ':23:respawn:/sbin/getty 38400 tty' - for i in range(2, 7): - i = str(i) - sed_i(inittab_path, '^' + i + ttyx + i, '#' + i + ttyx + i) - - -class AddGrubPackage(Task): - description = 'Adding grub package' - phase = phases.preparation - predecessors = [apt.AddDefaultSources] - - @classmethod - def run(cls, info): - info.packages.add('grub-pc') - - -class ConfigureGrub(Task): - description = 'Configuring grub' - phase = phases.system_modification - predecessors = [filesystem.FStab] - - @classmethod - def run(cls, info): - from bootstrapvz.common.tools import sed_i - grub_def = os.path.join(info.root, 'etc/default/grub') - sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') - sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT="quiet"', - 'GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"') - - -class InstallGrub(Task): - description = 'Installing grub' - phase = phases.system_modification - predecessors = [filesystem.FStab] - - @classmethod - def run(cls, info): - from ..fs.loopbackvolume import LoopbackVolume - from ..tools import log_check_call - - boot_dir = os.path.join(info.root, 'boot') - grub_dir = os.path.join(boot_dir, 'grub') - - from ..fs import remount - p_map = info.volume.partition_map - - def link_fn(): - info.volume.link_dm_node() - if isinstance(p_map, partitionmaps.none.NoPartitions): - p_map.root.device_path = info.volume.device_path - - def unlink_fn(): - info.volume.unlink_dm_node() - if isinstance(p_map, partitionmaps.none.NoPartitions): - p_map.root.device_path = info.volume.device_path - - # GRUB cannot deal with installing to loopback devices - # so we fake a real harddisk with dmsetup. - # Guide here: http://ebroder.net/2009/08/04/installing-grub-onto-a-disk-image/ - if isinstance(info.volume, LoopbackVolume): - remount(info.volume, link_fn) - try: - [device_path] = log_check_call(['readlink', '-f', info.volume.device_path]) - device_map_path = os.path.join(grub_dir, 'device.map') - partition_prefix = 'msdos' - if isinstance(p_map, partitionmaps.gpt.GPTPartitionMap): - partition_prefix = 'gpt' - with open(device_map_path, 'w') as device_map: - device_map.write('(hd0) {device_path}\n'.format(device_path=device_path)) - if not isinstance(p_map, partitionmaps.none.NoPartitions): - for idx, partition in enumerate(info.volume.partition_map.partitions): - device_map.write('(hd0,{prefix}{idx}) {device_path}\n' - .format(device_path=partition.device_path, - prefix=partition_prefix, - idx=idx + 1)) - - # Install grub - log_check_call(['chroot', info.root, - 'grub-install', device_path]) - log_check_call(['chroot', info.root, 'update-grub']) - except Exception as e: - if isinstance(info.volume, LoopbackVolume): - remount(info.volume, unlink_fn) - raise e - - if isinstance(info.volume, LoopbackVolume): - remount(info.volume, unlink_fn) - - -class AddExtlinuxPackage(Task): - description = 'Adding extlinux package' - phase = phases.preparation - predecessors = [apt.AddDefaultSources] - - @classmethod - def run(cls, info): - info.packages.add('extlinux') - if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): - info.packages.add('syslinux-common') - - -class InstallExtLinux(Task): - description = 'Installing extlinux' - phase = phases.system_modification - predecessors = [filesystem.FStab] - - @classmethod - def run(cls, info): - from ..tools import log_check_call - if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): - bootloader = '/usr/lib/syslinux/gptmbr.bin' + # Forward compatible check for jessie + from bootstrapvz.common.releases import jessie + if info.manifest.release < jessie: + from ..tools import sed_i + inittab_path = os.path.join(info.root, 'etc/inittab') + tty1 = '1:2345:respawn:/sbin/getty 38400 tty1' + sed_i(inittab_path, '^' + tty1, '#' + tty1) + ttyx = ':23:respawn:/sbin/getty 38400 tty' + for i in range(2, 7): + i = str(i) + sed_i(inittab_path, '^' + i + ttyx + i, '#' + i + ttyx + i) else: - bootloader = '/usr/lib/extlinux/mbr.bin' - log_check_call(['chroot', info.root, - 'dd', 'bs=440', 'count=1', - 'if=' + bootloader, - 'of=' + info.volume.device_path]) - log_check_call(['chroot', info.root, - 'extlinux', - '--install', '/boot/extlinux']) - log_check_call(['chroot', info.root, - 'extlinux-update']) + from shutil import copy + logind_asset_path = os.path.join(assets, 'systemd/logind.conf') + logind_destination = os.path.join(info.root, 'etc/systemd/logind.conf') + copy(logind_asset_path, logind_destination) diff --git a/bootstrapvz/common/tasks/bootstrap.py b/bootstrapvz/common/tasks/bootstrap.py index 5e99248..e656f63 100644 --- a/bootstrapvz/common/tasks/bootstrap.py +++ b/bootstrapvz/common/tasks/bootstrap.py @@ -19,7 +19,8 @@ class AddRequiredCommands(Task): def get_bootstrap_args(info): executable = ['debootstrap'] - options = ['--arch=' + info.manifest.system['architecture']] + arch = info.manifest.system.get('userspace_architecture', info.manifest.system.get('architecture')) + options = ['--arch=' + arch] if len(info.include_packages) > 0: options.append('--include=' + ','.join(info.include_packages)) if len(info.exclude_packages) > 0: @@ -79,7 +80,6 @@ class Bootstrap(Task): class IncludePackagesInBootstrap(Task): description = 'Add packages in the bootstrap phase' phase = phases.preparation - successors = [Bootstrap] @classmethod def run(cls, info): @@ -91,7 +91,6 @@ class IncludePackagesInBootstrap(Task): class ExcludePackagesInBootstrap(Task): description = 'Remove packages from bootstrap phase' phase = phases.preparation - successors = [Bootstrap] @classmethod def run(cls, info): diff --git a/bootstrapvz/common/tasks/extlinux.py b/bootstrapvz/common/tasks/extlinux.py new file mode 100644 index 0000000..25da011 --- /dev/null +++ b/bootstrapvz/common/tasks/extlinux.py @@ -0,0 +1,114 @@ +from bootstrapvz.base import Task +from .. import phases +from ..tools import log_check_call +import filesystem +import kernel +from bootstrapvz.base.fs import partitionmaps +import os + + +class AddExtlinuxPackage(Task): + description = 'Adding extlinux package' + phase = phases.preparation + + @classmethod + def run(cls, info): + info.packages.add('extlinux') + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + info.packages.add('syslinux-common') + + +class ConfigureExtlinux(Task): + description = 'Configuring extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab] + + @classmethod + def run(cls, info): + from bootstrapvz.common.releases import squeeze + if info.manifest.release == squeeze: + # On squeeze /etc/default/extlinux is generated when running extlinux-update + log_check_call(['chroot', info.root, + 'extlinux-update']) + from bootstrapvz.common.tools import sed_i + extlinux_def = os.path.join(info.root, 'etc/default/extlinux') + sed_i(extlinux_def, r'^EXTLINUX_PARAMETERS="([^"]+)"$', + r'EXTLINUX_PARAMETERS="\1 console=ttyS0"') + + +class InstallExtlinux(Task): + description = 'Installing extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab, ConfigureExtlinux] + + @classmethod + def run(cls, info): + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + bootloader = '/usr/lib/syslinux/gptmbr.bin' + else: + bootloader = '/usr/lib/extlinux/mbr.bin' + log_check_call(['chroot', info.root, + 'dd', 'bs=440', 'count=1', + 'if=' + bootloader, + 'of=' + info.volume.device_path]) + log_check_call(['chroot', info.root, + 'extlinux', + '--install', '/boot/extlinux']) + log_check_call(['chroot', info.root, + 'extlinux-update']) + + +class ConfigureExtlinuxJessie(Task): + description = 'Configuring extlinux' + phase = phases.system_modification + + @classmethod + def run(cls, info): + extlinux_path = os.path.join(info.root, 'boot/extlinux') + os.mkdir(extlinux_path) + + from . import assets + with open(os.path.join(assets, 'extlinux/extlinux.conf')) as template: + extlinux_config_tpl = template.read() + + config_vars = {'root_uuid': info.volume.partition_map.root.get_uuid(), + 'kernel_version': info.kernel_version} + # Check if / and /boot are on the same partition + # If not, /boot will actually be / when booting + if hasattr(info.volume.partition_map, 'boot'): + config_vars['boot_prefix'] = '' + else: + config_vars['boot_prefix'] = '/boot' + + extlinux_config = extlinux_config_tpl.format(**config_vars) + + with open(os.path.join(extlinux_path, 'extlinux.conf'), 'w') as extlinux_conf_handle: + extlinux_conf_handle.write(extlinux_config) + + # Copy the boot message + from shutil import copy + boot_txt_path = os.path.join(assets, 'extlinux/boot.txt') + copy(boot_txt_path, os.path.join(extlinux_path, 'boot.txt')) + + +class InstallExtlinuxJessie(Task): + description = 'Installing extlinux' + phase = phases.system_modification + predecessors = [filesystem.FStab, ConfigureExtlinuxJessie] + # Make sure the kernel image is updated after we have installed the bootloader + successors = [kernel.UpdateInitramfs] + + @classmethod + def run(cls, info): + if isinstance(info.volume.partition_map, partitionmaps.gpt.GPTPartitionMap): + # Yeah, somebody saw it fit to uppercase that folder in jessie. Why? BECAUSE + bootloader = '/usr/lib/EXTLINUX/gptmbr.bin' + else: + bootloader = '/usr/lib/EXTLINUX/mbr.bin' + log_check_call(['chroot', info.root, + 'dd', 'bs=440', 'count=1', + 'if=' + bootloader, + 'of=' + info.volume.device_path]) + log_check_call(['chroot', info.root, + 'extlinux', + '--install', '/boot/extlinux']) diff --git a/bootstrapvz/common/tasks/filesystem.py b/bootstrapvz/common/tasks/filesystem.py index 8c4ab8a..9f348e0 100644 --- a/bootstrapvz/common/tasks/filesystem.py +++ b/bootstrapvz/common/tasks/filesystem.py @@ -1,7 +1,6 @@ from bootstrapvz.base import Task from .. import phases from ..tools import log_check_call -import apt import bootstrap import host import volume @@ -26,8 +25,9 @@ class Format(Task): def run(cls, info): from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition for partition in info.volume.partition_map.partitions: - if not isinstance(partition, UnformattedPartition): - partition.format() + if isinstance(partition, UnformattedPartition): + continue + partition.format() class TuneVolumeFS(Task): @@ -41,15 +41,15 @@ class TuneVolumeFS(Task): import re # Disable the time based filesystem check for partition in info.volume.partition_map.partitions: - if not isinstance(partition, UnformattedPartition): - if re.match('^ext[2-4]$', partition.filesystem) is not None: - log_check_call(['tune2fs', '-i', '0', partition.device_path]) + if isinstance(partition, UnformattedPartition): + continue + if re.match('^ext[2-4]$', partition.filesystem) is not None: + log_check_call(['tune2fs', '-i', '0', partition.device_path]) class AddXFSProgs(Task): description = 'Adding `xfsprogs\' to the image packages' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -113,6 +113,18 @@ class MountSpecials(Task): root.add_mount('none', 'dev/pts', ['--types', 'devpts']) +class CopyMountTable(Task): + description = 'Copying mtab from host system' + phase = phases.os_installation + predecessors = [MountSpecials] + + @classmethod + def run(cls, info): + import shutil + import os.path + shutil.copy('/proc/mounts', os.path.join(info.root, 'etc/mtab')) + + class UnmountRoot(Task): description = 'Unmounting the bootstrap volume' phase = phases.volume_unmounting @@ -123,6 +135,17 @@ class UnmountRoot(Task): info.volume.partition_map.root.unmount() +class RemoveMountTable(Task): + description = 'Removing mtab' + phase = phases.volume_unmounting + successors = [UnmountRoot] + + @classmethod + def run(cls, info): + import os + os.remove(os.path.join(info.root, 'etc/mtab')) + + class DeleteMountDir(Task): description = 'Deleting mountpoint for the bootstrap volume' phase = phases.volume_unmounting diff --git a/bootstrapvz/common/tasks/grub.py b/bootstrapvz/common/tasks/grub.py new file mode 100644 index 0000000..5fe60b2 --- /dev/null +++ b/bootstrapvz/common/tasks/grub.py @@ -0,0 +1,85 @@ +from bootstrapvz.base import Task +from .. import phases +from ..tools import log_check_call +import filesystem +import kernel +from bootstrapvz.base.fs import partitionmaps +import os.path + + +class AddGrubPackage(Task): + description = 'Adding grub package' + phase = phases.preparation + + @classmethod + def run(cls, info): + info.packages.add('grub-pc') + + +class ConfigureGrub(Task): + description = 'Configuring grub' + phase = phases.system_modification + predecessors = [filesystem.FStab] + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + grub_def = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') + sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT="quiet"', + 'GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"') + + +class InstallGrub_1_99(Task): + description = 'Installing grub 1.99' + phase = phases.system_modification + predecessors = [filesystem.FStab] + + @classmethod + def run(cls, info): + p_map = info.volume.partition_map + + # GRUB screws up when installing in chrooted environments + # so we fake a real harddisk with dmsetup. + # Guide here: http://ebroder.net/2009/08/04/installing-grub-onto-a-disk-image/ + from ..fs import unmounted + with unmounted(info.volume): + info.volume.link_dm_node() + if isinstance(p_map, partitionmaps.none.NoPartitions): + p_map.root.device_path = info.volume.device_path + try: + [device_path] = log_check_call(['readlink', '-f', info.volume.device_path]) + device_map_path = os.path.join(info.root, 'boot/grub/device.map') + partition_prefix = 'msdos' + if isinstance(p_map, partitionmaps.gpt.GPTPartitionMap): + partition_prefix = 'gpt' + with open(device_map_path, 'w') as device_map: + device_map.write('(hd0) {device_path}\n'.format(device_path=device_path)) + if not isinstance(p_map, partitionmaps.none.NoPartitions): + for idx, partition in enumerate(info.volume.partition_map.partitions): + device_map.write('(hd0,{prefix}{idx}) {device_path}\n' + .format(device_path=partition.device_path, + prefix=partition_prefix, + idx=idx + 1)) + + # Install grub + log_check_call(['chroot', info.root, 'grub-install', device_path]) + log_check_call(['chroot', info.root, 'update-grub']) + finally: + with unmounted(info.volume): + info.volume.unlink_dm_node() + if isinstance(p_map, partitionmaps.none.NoPartitions): + p_map.root.device_path = info.volume.device_path + + +class InstallGrub_2(Task): + description = 'Installing grub 2' + phase = phases.system_modification + predecessors = [filesystem.FStab] + # Make sure the kernel image is updated after we have installed the bootloader + successors = [kernel.UpdateInitramfs] + + @classmethod + def run(cls, info): + log_check_call(['chroot', info.root, 'grub-install', info.volume.device_path]) + log_check_call(['chroot', info.root, 'update-grub']) diff --git a/bootstrapvz/common/tasks/initd.py b/bootstrapvz/common/tasks/initd.py index fdc14f5..5e43fa5 100644 --- a/bootstrapvz/common/tasks/initd.py +++ b/bootstrapvz/common/tasks/initd.py @@ -44,8 +44,9 @@ class RemoveHWClock(Task): @classmethod def run(cls, info): + from bootstrapvz.common.releases import squeeze info.initd['disable'].append('hwclock.sh') - if info.manifest.system['release'] == 'squeeze': + if info.manifest.release == squeeze: info.initd['disable'].append('hwclockfirst.sh') @@ -61,4 +62,4 @@ class AdjustExpandRootScript(Task): script = os.path.join(info.root, 'etc/init.d/expand-root') root_idx = info.volume.partition_map.root.get_index() device_path = 'device_path="/dev/xvda{idx}"'.format(idx=root_idx) - sed_i(script, '^device_path="/dev/xvda$', device_path) + sed_i(script, '^device_path="/dev/xvda"$', device_path) diff --git a/bootstrapvz/common/tasks/kernel.py b/bootstrapvz/common/tasks/kernel.py new file mode 100644 index 0000000..5acecf6 --- /dev/null +++ b/bootstrapvz/common/tasks/kernel.py @@ -0,0 +1,52 @@ +from bootstrapvz.base import Task +from .. import phases +from ..tasks import packages +import logging + + +class AddDKMSPackages(Task): + description = 'Adding DKMS and kernel header packages' + phase = phases.package_installation + successors = [packages.InstallPackages] + + @classmethod + def run(cls, info): + info.packages.add('dkms') + kernel_pkg_arch = {'i386': '686-pae', 'amd64': 'amd64'}[info.manifest.system['architecture']] + info.packages.add('linux-headers-' + kernel_pkg_arch) + + +class UpdateInitramfs(Task): + description = 'Rebuilding initramfs' + phase = phases.system_modification + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + # Update initramfs (-u) for all currently installed kernel versions (-k all) + log_check_call(['chroot', info.root, 'update-initramfs', '-u', '-k', 'all']) + + +class DetermineKernelVersion(Task): + description = 'Determining kernel version' + phase = phases.package_installation + predecessors = [packages.InstallPackages] + + @classmethod + def run(cls, info): + # Snatched from `extlinux-update' in wheezy + # list the files in boot/ that match vmlinuz-* + # sort what the * matches, the first entry is the kernel version + import os.path + import re + regexp = re.compile('^vmlinuz-(?P.+)$') + + def get_kernel_version(vmlinuz_path): + vmlinux_basename = os.path.basename(vmlinuz_path) + return regexp.match(vmlinux_basename).group('version') + from glob import glob + boot = os.path.join(info.root, 'boot') + vmlinuz_paths = glob('{boot}/vmlinuz-*'.format(boot=boot)) + kernels = map(get_kernel_version, vmlinuz_paths) + info.kernel_version = sorted(kernels, reverse=True)[0] + logging.getLogger(__name__).debug('Kernel version is {version}'.format(version=info.kernel_version)) diff --git a/bootstrapvz/common/tasks/loopback.py b/bootstrapvz/common/tasks/loopback.py index 8283320..d52b3de 100644 --- a/bootstrapvz/common/tasks/loopback.py +++ b/bootstrapvz/common/tasks/loopback.py @@ -12,12 +12,12 @@ class AddRequiredCommands(Task): @classmethod def run(cls, info): from ..fs.loopbackvolume import LoopbackVolume - if isinstance(info.volume, LoopbackVolume): - info.host_dependencies['qemu-img'] = 'qemu-utils' - info.host_dependencies['losetup'] = 'mount' from ..fs.qemuvolume import QEMUVolume - if isinstance(info.volume, QEMUVolume): + if type(info.volume) is LoopbackVolume: info.host_dependencies['losetup'] = 'mount' + info.host_dependencies['truncate'] = 'coreutils' + if isinstance(info.volume, QEMUVolume): + info.host_dependencies['qemu-img'] = 'qemu-utils' class Create(Task): @@ -45,6 +45,7 @@ class MoveImage(Task): destination = os.path.join(info.manifest.bootstrapper['workspace'], filename) import shutil shutil.move(info.volume.image_path, destination) + info.volume.image_path = destination import logging log = logging.getLogger(__name__) log.info('The volume image has been moved to ' + destination) diff --git a/bootstrapvz/common/tasks/network-configuration.json b/bootstrapvz/common/tasks/network-configuration.json deleted file mode 100644 index 2937384..0000000 --- a/bootstrapvz/common/tasks/network-configuration.json +++ /dev/null @@ -1,14 +0,0 @@ -// This is a mapping of Debian release codenames to NIC configurations -// Every item in an array is a line -{ -"squeeze": ["auto lo", - "iface lo inet loopback", - "auto eth0", - "iface eth0 inet dhcp"], -"wheezy": ["auto eth0", - "iface eth0 inet dhcp"], -"jessie": ["auto eth0", - "iface eth0 inet dhcp"], -"sid": ["auto eth0", - "iface eth0 inet dhcp"] -} diff --git a/bootstrapvz/common/tasks/network-configuration.yml b/bootstrapvz/common/tasks/network-configuration.yml new file mode 100644 index 0000000..12e5f03 --- /dev/null +++ b/bootstrapvz/common/tasks/network-configuration.yml @@ -0,0 +1,16 @@ +--- +# This is a mapping of Debian release codenames to NIC configurations +squeeze: | + auto lo + iface lo inet loopback + auto eth0 + iface eth0 inet dhcp +wheezy: | + auto eth0 + iface eth0 inet dhcp +jessie: | + auto eth0 + iface eth0 inet dhcp +sid: | + auto eth0 + iface eth0 inet dhcp diff --git a/bootstrapvz/common/tasks/network.py b/bootstrapvz/common/tasks/network.py index a85a34d..c073867 100644 --- a/bootstrapvz/common/tasks/network.py +++ b/bootstrapvz/common/tasks/network.py @@ -5,7 +5,7 @@ import os class RemoveDNSInfo(Task): description = 'Removing resolv.conf' - phase = phases.system_modification + phase = phases.system_cleaning @classmethod def run(cls, info): @@ -15,7 +15,7 @@ class RemoveDNSInfo(Task): class RemoveHostname(Task): description = 'Removing the hostname file' - phase = phases.system_modification + phase = phases.system_cleaning @classmethod def run(cls, info): @@ -45,10 +45,10 @@ class ConfigureNetworkIF(Task): @classmethod def run(cls, info): - network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.json') + network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.yml') from ..tools import config_get - if_config = config_get(network_config_path, [info.release_codename]) + if_config = config_get(network_config_path, [info.manifest.release.codename]) interfaces_path = os.path.join(info.root, 'etc/network/interfaces') with open(interfaces_path, 'a') as interfaces: - interfaces.write('\n'.join(if_config) + '\n') + interfaces.write(if_config + '\n') diff --git a/bootstrapvz/common/tasks/packages.py b/bootstrapvz/common/tasks/packages.py index 602a2a0..662ae11 100644 --- a/bootstrapvz/common/tasks/packages.py +++ b/bootstrapvz/common/tasks/packages.py @@ -7,7 +7,6 @@ from ..tools import log_check_call class AddManifestPackages(Task): description = 'Adding packages from the manifest' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -49,8 +48,8 @@ class InstallPackages(Task): log_check_call(['chroot', info.root, 'apt-get', 'install', '--no-install-recommends', - '--assume-yes'] - + map(str, remote_packages), + '--assume-yes'] + + map(str, remote_packages), env=env) except CalledProcessError as e: import logging @@ -70,7 +69,7 @@ class InstallPackages(Task): 'This can sometimes occur when package retrieval times out or a package extraction failed. ' 'apt might succeed if you try bootstrapping again.') logging.getLogger(__name__).warn(msg) - raise e + raise @classmethod def install_local(cls, info, local_packages): @@ -91,8 +90,7 @@ class InstallPackages(Task): env = os.environ.copy() env['DEBIAN_FRONTEND'] = 'noninteractive' log_check_call(['chroot', info.root, - 'dpkg', '--install'] - + chrooted_package_paths, + 'dpkg', '--install'] + chrooted_package_paths, env=env) for path in absolute_package_paths: diff --git a/bootstrapvz/common/tasks/ssh.py b/bootstrapvz/common/tasks/ssh.py index 242330d..c3f14c5 100644 --- a/bootstrapvz/common/tasks/ssh.py +++ b/bootstrapvz/common/tasks/ssh.py @@ -3,14 +3,12 @@ from .. import phases from ..tools import log_check_call import os.path from . import assets -import apt import initd class AddOpenSSHPackage(Task): description = 'Adding openssh package' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -30,7 +28,8 @@ class AddSSHKeyGeneration(Task): try: log_check_call(['chroot', info.root, 'dpkg-query', '-W', 'openssh-server']) - if info.manifest.system['release'] == 'squeeze': + from bootstrapvz.common.releases import squeeze + if info.manifest.release == squeeze: install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys') else: install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'generate-ssh-hostkeys') @@ -51,6 +50,38 @@ class DisableSSHPasswordAuthentication(Task): sed_i(sshd_config_path, '^#PasswordAuthentication yes', 'PasswordAuthentication no') +class EnableRootLogin(Task): + description = 'Disabling SSH login for root' + phase = phases.system_modification + + @classmethod + def run(cls, info): + sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') + if os.path.exists(sshdconfig_path): + from bootstrapvz.common.tools import sed_i + sed_i(sshdconfig_path, 'PermitRootLogin .*', 'PermitRootLogin yes') + else: + import logging + logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' + 'not enabling SSH root login.') + + +class DisableRootLogin(Task): + description = 'Disabling SSH login for root' + phase = phases.system_modification + + @classmethod + def run(cls, info): + sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') + if os.path.exists(sshdconfig_path): + from bootstrapvz.common.tools import sed_i + sed_i(sshdconfig_path, 'PermitRootLogin .*', 'PermitRootLogin no') + else: + import logging + logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' + 'not disabling SSH root login.') + + class DisableSSHDNSLookup(Task): description = 'Disabling sshd remote host name lookup' phase = phases.system_modification @@ -70,7 +101,8 @@ class ShredHostkeys(Task): def run(cls, info): ssh_hostkeys = ['ssh_host_dsa_key', 'ssh_host_rsa_key'] - if info.manifest.system['release'] != 'squeeze': + from bootstrapvz.common.releases import wheezy + if info.manifest.release >= wheezy: ssh_hostkeys.append('ssh_host_ecdsa_key') private = [os.path.join(info.root, 'etc/ssh', name) for name in ssh_hostkeys] diff --git a/bootstrapvz/common/tools.py b/bootstrapvz/common/tools.py index 4b75d3f..5df7ff3 100644 --- a/bootstrapvz/common/tools.py +++ b/bootstrapvz/common/tools.py @@ -1,12 +1,20 @@ -def log_check_call(command, stdin=None, env=None, shell=False): - status, stdout, stderr = log_call(command, stdin, env, shell) +import os + + +def log_check_call(command, stdin=None, env=None, shell=False, cwd=None): + status, stdout, stderr = log_call(command, stdin, env, shell, cwd) + from subprocess import CalledProcessError if status != 0: - from subprocess import CalledProcessError - raise CalledProcessError(status, ' '.join(command), '\n'.join(stderr)) + e = CalledProcessError(status, ' '.join(command), '\n'.join(stderr)) + # Fix Pyro4's fixIronPythonExceptionForPickle() by setting the args property, + # even though we use our own serialization (at least I think that's the problem). + # See bootstrapvz.remote.serialize_called_process_error for more info. + setattr(e, 'args', (status, ' '.join(command), '\n'.join(stderr))) + raise e return stdout -def log_call(command, stdin=None, env=None, shell=False): +def log_call(command, stdin=None, env=None, shell=False, cwd=None): import subprocess import logging from multiprocessing.dummy import Pool as ThreadPool @@ -14,9 +22,12 @@ def log_call(command, stdin=None, env=None, shell=False): command_log = realpath(command[0]).replace('/', '.') log = logging.getLogger(__name__ + command_log) - log.debug('Executing: {command}'.format(command=' '.join(command))) + if type(command) is list: + log.debug('Executing: {command}'.format(command=' '.join(command))) + else: + log.debug('Executing: {command}'.format(command=command)) - process = subprocess.Popen(args=command, env=env, shell=shell, + process = subprocess.Popen(args=command, env=env, shell=shell, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -53,11 +64,26 @@ def log_call(command, stdin=None, env=None, shell=False): return process.returncode, stdout, stderr -def sed_i(file_path, pattern, subst): +def sed_i(file_path, pattern, subst, expected_replacements=1): + replacement_count = inline_replace(file_path, pattern, subst) + if replacement_count != expected_replacements: + from exceptions import UnexpectedNumMatchesError + msg = ('There were {real} instead of {expected} matches for ' + 'the expression `{exp}\' in the file `{path}\'' + .format(real=replacement_count, expected=expected_replacements, + exp=pattern, path=file_path)) + raise UnexpectedNumMatchesError(msg) + + +def inline_replace(file_path, pattern, subst): import fileinput import re + replacement_count = 0 for line in fileinput.input(files=file_path, inplace=True): - print re.sub(pattern, subst, line), + (replacement, count) = re.subn(pattern, subst, line) + replacement_count += count + print replacement, + return replacement_count def load_json(path): @@ -69,12 +95,24 @@ def load_json(path): def load_yaml(path): import yaml - with open(path, 'r') as fobj: - return yaml.safe_load(fobj) + with open(path, 'r') as stream: + return yaml.safe_load(stream) + + +def load_data(path): + filename, extension = os.path.splitext(path) + if not os.path.isfile(path): + raise Exception('The path {path} does not point to a file.'.format(path=path)) + if extension == '.json': + return load_json(path) + elif extension == '.yml' or extension == '.yaml': + return load_yaml(path) + else: + raise Exception('Unrecognized extension: {ext}'.format(ext=extension)) def config_get(path, config_path): - config = load_json(path) + config = load_data(path) for key in config_path: config = config.get(key) return config @@ -82,7 +120,6 @@ def config_get(path, config_path): def copy_tree(from_path, to_path): from shutil import copy - import os for abs_prefix, dirs, files in os.walk(from_path): prefix = os.path.normpath(os.path.relpath(abs_prefix, from_path)) for path in dirs: diff --git a/bootstrapvz/plugins/README.rst b/bootstrapvz/plugins/README.rst new file mode 100644 index 0000000..df46d3c --- /dev/null +++ b/bootstrapvz/plugins/README.rst @@ -0,0 +1,8 @@ +Plugins are a key feature of bootstrap-vz. Despite their small size +(most plugins do not exceed 100 source lines of code) they can modify +the behavior of bootstrapped systems to a great extent. + +Below you will find documentation for all plugins available for +bootstrap-vz. If you cannot find what you are looking for, consider +`developing it yourself `__ and +contribute to this list! diff --git a/bootstrapvz/plugins/admin_user/README.rst b/bootstrapvz/plugins/admin_user/README.rst new file mode 100644 index 0000000..8da68d1 --- /dev/null +++ b/bootstrapvz/plugins/admin_user/README.rst @@ -0,0 +1,12 @@ +Admin user +---------- + +This plugin creates a user with passwordless sudo privileges. It also +disables the SSH root login. If the EC2 init scripts are installed, the +script for fetching the SSH authorized keys will be adjust to match the +username specified. + +Settings +~~~~~~~~ + +- ``username``: The username of the account to create. ``required`` diff --git a/bootstrapvz/plugins/admin_user/__init__.py b/bootstrapvz/plugins/admin_user/__init__.py index 67f3488..11888de 100644 --- a/bootstrapvz/plugins/admin_user/__init__.py +++ b/bootstrapvz/plugins/admin_user/__init__.py @@ -2,18 +2,22 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) def resolve_tasks(taskset, manifest): import tasks + from bootstrapvz.common.tasks import ssh from bootstrapvz.providers.ec2.tasks import initd if initd.AddEC2InitScripts in taskset: taskset.add(tasks.AdminUserCredentials) + from bootstrapvz.common.releases import jessie + if manifest.release < jessie: + taskset.update([ssh.DisableRootLogin]) + taskset.update([tasks.AddSudoPackage, tasks.CreateAdminUser, tasks.PasswordlessSudo, - tasks.DisableRootLogin, ]) diff --git a/bootstrapvz/plugins/admin_user/manifest-schema.json b/bootstrapvz/plugins/admin_user/manifest-schema.json deleted file mode 100644 index fc3c421..0000000 --- a/bootstrapvz/plugins/admin_user/manifest-schema.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Admin user plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "admin_user": { - "type": "object", - "properties": { - "username": { - "type": "string" - } - }, - "required": ["username"] - } - } - } - } -} diff --git a/bootstrapvz/plugins/admin_user/manifest-schema.yml b/bootstrapvz/plugins/admin_user/manifest-schema.yml new file mode 100644 index 0000000..02cce0a --- /dev/null +++ b/bootstrapvz/plugins/admin_user/manifest-schema.yml @@ -0,0 +1,14 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Admin user plugin manifest +type: object +properties: + plugins: + type: object + properties: + admin_user: + type: object + properties: + username: {type: string} + required: [username] + additionalProperties: false diff --git a/bootstrapvz/plugins/admin_user/tasks.py b/bootstrapvz/plugins/admin_user/tasks.py index 5266f27..ae7f221 100644 --- a/bootstrapvz/plugins/admin_user/tasks.py +++ b/bootstrapvz/plugins/admin_user/tasks.py @@ -1,14 +1,12 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks.initd import InstallInitScripts -from bootstrapvz.common.tasks import apt import os class AddSudoPackage(Task): description = 'Adding `sudo\' to the image packages' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -54,23 +52,3 @@ class AdminUserCredentials(Task): getcreds_path = os.path.join(info.root, 'etc/init.d/ec2-get-credentials') username = info.manifest.plugins['admin_user']['username'] sed_i(getcreds_path, 'username=\'root\'', 'username=\'{username}\''.format(username=username)) - - -class DisableRootLogin(Task): - description = 'Disabling SSH login for root' - phase = phases.system_modification - - @classmethod - def run(cls, info): - from subprocess import CalledProcessError - from bootstrapvz.common.tools import log_check_call - try: - log_check_call(['chroot', info.root, - 'dpkg-query', '-W', 'openssh-server']) - from bootstrapvz.common.tools import sed_i - sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config') - sed_i(sshdconfig_path, 'PermitRootLogin yes', 'PermitRootLogin no') - except CalledProcessError: - import logging - logging.getLogger(__name__).warn('The OpenSSH server has not been installed, ' - 'not disabling SSH root login.') diff --git a/bootstrapvz/plugins/ansible/__init__.py b/bootstrapvz/plugins/ansible/__init__.py new file mode 100644 index 0000000..f060ff3 --- /dev/null +++ b/bootstrapvz/plugins/ansible/__init__.py @@ -0,0 +1,13 @@ +import tasks + + +def validate_manifest(data, validator, error): + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + + +def resolve_tasks(taskset, manifest): + taskset.add(tasks.AddPackages) + taskset.add(tasks.CheckPlaybookPath) + taskset.add(tasks.RunAnsiblePlaybook) diff --git a/bootstrapvz/plugins/ansible/manifest-schema.yml b/bootstrapvz/plugins/ansible/manifest-schema.yml new file mode 100644 index 0000000..dc99679 --- /dev/null +++ b/bootstrapvz/plugins/ansible/manifest-schema.yml @@ -0,0 +1,29 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Ansible plugin manifest +type: object +properties: + plugins: + type: object + properties: + ansible: + type: object + properties: + extra_vars: {type: string} + tags: {type: string} + skip_tags: {type: string} + opt_flags: + type: array + flag: {type: string} + minItems: 1 + hosts: + type: array + host: {type: string} + minItems: 1 + playbook: {$ref: '#/definitions/absolute_path'} + required: [playbook] + additionalProperties: false +definitions: + absolute_path: + pattern: ^/[^\0]+$ + type: string diff --git a/bootstrapvz/plugins/ansible/tasks.py b/bootstrapvz/plugins/ansible/tasks.py new file mode 100644 index 0000000..4dd675d --- /dev/null +++ b/bootstrapvz/plugins/ansible/tasks.py @@ -0,0 +1,96 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases +import os + + +class CheckPlaybookPath(Task): + description = 'Checking whether the playbook path exist' + phase = phases.preparation + + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + playbook = info.manifest.plugins['ansible']['playbook'] + if not os.path.exists(playbook): + msg = 'The playbook file {playbook} does not exist.'.format(playbook=playbook) + raise TaskError(msg) + if not os.path.isfile(playbook): + msg = 'The playbook path {playbook} does not point to a file.'.format(playbook=playbook) + raise TaskError(msg) + + +class AddPackages(Task): + description = 'Making sure python is installed' + phase = phases.preparation + + @classmethod + def run(cls, info): + info.packages.add('python') + + +class RunAnsiblePlaybook(Task): + description = 'Running ansible playbooks' + phase = phases.user_modification + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + + # Extract playbook and directory + playbook = info.manifest.plugins['ansible']['playbook'] + playbook_dir = os.path.dirname(os.path.realpath(playbook)) + + # Check for hosts + hosts = None + if 'hosts' in info.manifest.plugins['ansible']: + hosts = info.manifest.plugins['ansible']['hosts'] + + # Check for extra vars + extra_vars = None + if 'extra_vars' in info.manifest.plugins['ansible']: + extra_vars = info.manifest.plugins['ansible']['extra_vars'] + + tags = None + if 'tags' in info.manifest.plugins['ansible']: + tags = info.manifest.plugins['ansible']['tags'] + + skip_tags = None + if 'skip_tags' in info.manifest.plugins['ansible']: + skip_tags = info.manifest.plugins['ansible']['skip_tags'] + + opt_flags = None + if 'opt_flags' in info.manifest.plugins['ansible']: + opt_flags = info.manifest.plugins['ansible']['opt_flags'] + + # build the inventory file + inventory = os.path.join(info.root, 'tmp/bootstrap-inventory') + with open(inventory, 'w') as handle: + conn = '{} ansible_connection=chroot'.format(info.root) + content = "" + + if hosts: + for host in hosts: + content += '[{}]\n{}\n'.format(host, conn) + else: + content = conn + + handle.write(content) + + # build the ansible command + cmd = ['ansible-playbook', '-i', inventory, os.path.basename(playbook)] + if extra_vars: + tmp_cmd = ['--extra-vars', '\"{}\"'.format(extra_vars)] + cmd.extend(tmp_cmd) + if tags: + tmp_cmd = ['--tags={}'.format(tags)] + cmd.extend(tmp_cmd) + if skip_tags: + tmp_cmd = ['--skip_tags={}'.format(skip_tags)] + cmd.extend(tmp_cmd) + if opt_flags: + # Should probably do proper validation on these, but I don't think it should be used very often. + cmd.extend(opt_flags) + + # Run and remove the inventory file + log_check_call(cmd, cwd=playbook_dir) + os.remove(inventory) diff --git a/bootstrapvz/plugins/apt_proxy/README.rst b/bootstrapvz/plugins/apt_proxy/README.rst new file mode 100644 index 0000000..75c2c16 --- /dev/null +++ b/bootstrapvz/plugins/apt_proxy/README.rst @@ -0,0 +1,27 @@ +APT Proxy +--------- + +This plugin creates a proxy configuration file for APT, so you could +enjoy the benefits of using cached packages instead of downloading them +from the mirror every time. You could just install ``apt-cacher-ng`` on +the host machine and then add ``"address": "127.0.0.1"`` and +``"port": 3142`` to the manifest file. + +Settings +~~~~~~~~ + +- ``address``: The IP or host of the proxy server. + ``required`` +- ``port``: The port (integer) of the proxy server. + ``required`` +- ``username``: The username for authentication against the proxy server. + This is ignored if ``password`` is not also set. + ``optional`` +- ``password``: The password for authentication against the proxy server. + This is ignored if ``username`` is not also set. + ``optional`` +- ``persistent``: Whether the proxy configuration file should remain on + the machine or not. + Valid values: ``true``, ``false`` + Default: ``false``. + ``optional`` diff --git a/bootstrapvz/plugins/apt_proxy/__init__.py b/bootstrapvz/plugins/apt_proxy/__init__.py index a5b086c..132c679 100644 --- a/bootstrapvz/plugins/apt_proxy/__init__.py +++ b/bootstrapvz/plugins/apt_proxy/__init__.py @@ -1,11 +1,12 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) def resolve_tasks(taskset, manifest): import tasks + taskset.add(tasks.CheckAptProxy) taskset.add(tasks.SetAptProxy) if not manifest.plugins['apt_proxy'].get('persistent', False): taskset.add(tasks.RemoveAptProxy) diff --git a/bootstrapvz/plugins/apt_proxy/manifest-schema.json b/bootstrapvz/plugins/apt_proxy/manifest-schema.json deleted file mode 100644 index 8c8e932..0000000 --- a/bootstrapvz/plugins/apt_proxy/manifest-schema.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "APT proxy plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "apt_proxy": { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "persistent": { - "type": "boolean" - }, - "port": { - "type": "integer" - } - }, - "required": ["address", "port"] - } - } - } - } -} diff --git a/bootstrapvz/plugins/apt_proxy/manifest-schema.yml b/bootstrapvz/plugins/apt_proxy/manifest-schema.yml new file mode 100644 index 0000000..3f35a09 --- /dev/null +++ b/bootstrapvz/plugins/apt_proxy/manifest-schema.yml @@ -0,0 +1,18 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: APT proxy plugin manifest +type: object +properties: + plugins: + type: object + properties: + apt_proxy: + type: object + properties: + address: {type: string} + password: {type: string} + port: {type: integer} + persistent: {type: boolean} + username: {type: string} + required: [address, port] + additionalProperties: false diff --git a/bootstrapvz/plugins/apt_proxy/tasks.py b/bootstrapvz/plugins/apt_proxy/tasks.py index 9d8dbc3..f6823e5 100644 --- a/bootstrapvz/plugins/apt_proxy/tasks.py +++ b/bootstrapvz/plugins/apt_proxy/tasks.py @@ -2,6 +2,28 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt import os +import urllib2 + + +class CheckAptProxy(Task): + description = 'Checking reachability of APT proxy server' + phase = phases.preparation + + @classmethod + def run(cls, info): + proxy_address = info.manifest.plugins['apt_proxy']['address'] + proxy_port = info.manifest.plugins['apt_proxy']['port'] + proxy_url = 'http://{address}:{port}'.format(address=proxy_address, port=proxy_port) + try: + urllib2.urlopen(proxy_url, timeout=5) + except Exception as e: + # Default response from `apt-cacher-ng` + if isinstance(e, urllib2.HTTPError) and e.code == 404 and e.msg == 'Usage Information': + pass + else: + import logging + log = logging.getLogger(__name__) + log.warning('The APT proxy server couldn\'t be reached. `apt-get\' commands may fail.') class SetAptProxy(Task): @@ -12,11 +34,21 @@ class SetAptProxy(Task): @classmethod def run(cls, info): proxy_path = os.path.join(info.root, 'etc/apt/apt.conf.d/02proxy') + proxy_username = info.manifest.plugins['apt_proxy'].get('username') + proxy_password = info.manifest.plugins['apt_proxy'].get('password') proxy_address = info.manifest.plugins['apt_proxy']['address'] proxy_port = info.manifest.plugins['apt_proxy']['port'] + + if None not in (proxy_username, proxy_password): + proxy_auth = '{username}:{password}@'.format( + username=proxy_username, password=proxy_password) + else: + proxy_auth = '' + with open(proxy_path, 'w') as proxy_file: - proxy_file.write('Acquire::http {{ Proxy "http://{address}:{port}"; }};\n' - .format(address=proxy_address, port=proxy_port)) + proxy_file.write( + 'Acquire::http {{ Proxy "http://{auth}{address}:{port}"; }};\n' + .format(auth=proxy_auth, address=proxy_address, port=proxy_port)) class RemoveAptProxy(Task): diff --git a/bootstrapvz/plugins/chef/__init__.py b/bootstrapvz/plugins/chef/__init__.py index 7ba2396..5716b20 100644 --- a/bootstrapvz/plugins/chef/__init__.py +++ b/bootstrapvz/plugins/chef/__init__.py @@ -3,7 +3,7 @@ import tasks def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/chef/manifest-schema.json b/bootstrapvz/plugins/chef/manifest-schema.json deleted file mode 100644 index 9bc9d47..0000000 --- a/bootstrapvz/plugins/chef/manifest-schema.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Puppet plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "chef": { - "type": "object", - "properties": { - "assets": { "$ref": "#/definitions/absolute_path" } - }, - "minProperties": 1, - "additionalProperties": false - } - } - } - }, - "definitions": { - "absolute_path": { - "type": "string", - "pattern": "^/[^\\0]+$" - } - } -} diff --git a/bootstrapvz/plugins/chef/manifest-schema.yml b/bootstrapvz/plugins/chef/manifest-schema.yml new file mode 100644 index 0000000..da44dd8 --- /dev/null +++ b/bootstrapvz/plugins/chef/manifest-schema.yml @@ -0,0 +1,19 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Chef plugin manifest +type: object +properties: + plugins: + type: object + properties: + chef: + type: object + properties: + assets: + $ref: '#/definitions/absolute_path' + required: [assets] + additionalProperties: false +definitions: + absolute_path: + pattern: ^/[^\0]+$ + type: string diff --git a/bootstrapvz/plugins/chef/tasks.py b/bootstrapvz/plugins/chef/tasks.py index 2f103bf..2c02adf 100644 --- a/bootstrapvz/plugins/chef/tasks.py +++ b/bootstrapvz/plugins/chef/tasks.py @@ -1,6 +1,5 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt import os @@ -23,7 +22,6 @@ class CheckAssetsPath(Task): class AddPackages(Task): description = 'Add chef package' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): diff --git a/bootstrapvz/plugins/cloud_init/README.rst b/bootstrapvz/plugins/cloud_init/README.rst new file mode 100644 index 0000000..b5dc6c8 --- /dev/null +++ b/bootstrapvz/plugins/cloud_init/README.rst @@ -0,0 +1,23 @@ +cloud-init +---------- + +This plugin installs and configures +`cloud-init `__ +on the system. Depending on the release it installs it from either +backports or the main repository. + +cloud-init is only compatible with Debian wheezy and upwards. + +Settings +~~~~~~~~ + +- ``username``: The username of the account to create. + ``required`` +- ``disable_modules``: A list of strings specifying which cloud-init + modules should be disabled. + ``optional`` +- ``metadata_sources``: A string that sets the + `datasources `__ + that cloud-init should try fetching metadata from. The source is + automatically set when using the ec2 provider. + ``optional`` diff --git a/bootstrapvz/plugins/cloud_init/__init__.py b/bootstrapvz/plugins/cloud_init/__init__.py index c08eebe..7a59d40 100644 --- a/bootstrapvz/plugins/cloud_init/__init__.py +++ b/bootstrapvz/plugins/cloud_init/__init__.py @@ -2,18 +2,20 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) def resolve_tasks(taskset, manifest): import tasks import bootstrapvz.providers.ec2.tasks.initd as initd_ec2 + from bootstrapvz.common.tasks import apt from bootstrapvz.common.tasks import initd from bootstrapvz.common.tasks import ssh - if manifest.system['release'] in ['wheezy', 'stable']: - taskset.add(tasks.AddBackports) + from bootstrapvz.common.releases import wheezy + if manifest.release == wheezy: + taskset.add(apt.AddBackports) taskset.update([tasks.SetMetadataSource, tasks.AddCloudInitPackages, diff --git a/bootstrapvz/plugins/cloud_init/manifest-schema.json b/bootstrapvz/plugins/cloud_init/manifest-schema.json deleted file mode 100644 index 08cd153..0000000 --- a/bootstrapvz/plugins/cloud_init/manifest-schema.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "cloud-init plugin manifest", - "type": "object", - "properties": { - "system": { - "type": "object", - "properties": { - "release": { - "type": "string", - "enum": ["wheezy", "stable", - "jessie", "testing", - "sid", "unstable"] - } - } - }, - "plugins": { - "type": "object", - "properties": { - "cloud_init": { - "type": "object", - "properties": { - "username": { - "type": "string" - }, - "disable_modules": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "metadata_sources": { - "type": "string" - } - }, - "required": ["username"] - }, - "packages": {"type": "object"} - }, - "required": ["cloud_init"] - } - } -} diff --git a/bootstrapvz/plugins/cloud_init/manifest-schema.yml b/bootstrapvz/plugins/cloud_init/manifest-schema.yml new file mode 100644 index 0000000..1950c21 --- /dev/null +++ b/bootstrapvz/plugins/cloud_init/manifest-schema.yml @@ -0,0 +1,31 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: cloud-init plugin manifest +type: object +properties: + system: + type: object + properties: + release: + type: string + enum: + - wheezy + - stable + - jessie + - testing + - sid + - unstable + plugins: + type: object + properties: + cloud_init: + type: object + properties: + username: {type: string} + metadata_sources: {type: string} + disable_modules: + type: array + items: {type: string} + uniqueItems: true + required: [username] + additionalProperties: false diff --git a/bootstrapvz/plugins/cloud_init/tasks.py b/bootstrapvz/plugins/cloud_init/tasks.py index 007ad86..2947214 100644 --- a/bootstrapvz/plugins/cloud_init/tasks.py +++ b/bootstrapvz/plugins/cloud_init/tasks.py @@ -7,29 +7,16 @@ import logging import os.path -class AddBackports(Task): - description = 'Adding backports to the apt sources' - phase = phases.preparation - - @classmethod - def run(cls, info): - if info.source_lists.target_exists('{system.release}-backports'): - msg = ('{system.release}-backports target already exists').format(**info.manifest_vars) - logging.getLogger(__name__).info(msg) - else: - info.source_lists.add('backports', 'deb {apt_mirror} {system.release}-backports main') - info.source_lists.add('backports', 'deb-src {apt_mirror} {system.release}-backports main') - - class AddCloudInitPackages(Task): description = 'Adding cloud-init package and sudo' phase = phases.preparation - predecessors = [apt.AddDefaultSources, AddBackports] + predecessors = [apt.AddBackports] @classmethod def run(cls, info): target = None - if info.manifest.system['release'] in ['wheezy', 'stable']: + from bootstrapvz.common.releases import wheezy + if info.manifest.release == wheezy: target = '{system.release}-backports' info.packages.add('cloud-init', target) info.packages.add('sudo') @@ -63,10 +50,10 @@ class SetMetadataSource(Task): sources = info.manifest.plugins['cloud_init']['metadata_sources'] else: source_mapping = {'ec2': 'Ec2'} - sources = source_mapping.get(info.manifest.provider, None) + sources = source_mapping.get(info.manifest.provider['name'], None) if sources is None: msg = ('No cloud-init metadata source mapping found for provider `{provider}\', ' - 'skipping selections setting.').format(provider=info.manifest.provider) + 'skipping selections setting.').format(provider=info.manifest.provider['name']) logging.getLogger(__name__).warn(msg) return sources = "cloud-init cloud-init/datasources multiselect " + sources diff --git a/bootstrapvz/plugins/commands/README.rst b/bootstrapvz/plugins/commands/README.rst new file mode 100644 index 0000000..1e31635 --- /dev/null +++ b/bootstrapvz/plugins/commands/README.rst @@ -0,0 +1,31 @@ +Commands +-------------- + +This plugin allows you to run arbitrary commands during the bootstrap process. +The commands are run at an indeterminate point *after* packages have been +installed, but *before* the volume has been unmounted. + +Settings +~~~~~~~~ + +- ``commands``: A list of lists containing strings. Each top-level item + is a single command, while the strings inside each list comprise + parts of a command. This allows for proper shell argument escaping. + To circumvent escaping, simply put the entire command in a single + string, the command will additionally be evaluated in a shell + (e.g. globbing will work). + In addition to the manifest variables ``{root}`` is also available. + It points at the root of the image volume. + ``required`` + ``manifest vars`` + +Example +~~~~~~~ + +Create an empty `index.html` in `/var/www` and delete all locales except english. +.. code:: yaml + + commands: + commands: + - [touch, '{root}/var/www/index.html'] + - ['rm -rf /usr/share/locale/[^en]*'] diff --git a/bootstrapvz/plugins/image_commands/__init__.py b/bootstrapvz/plugins/commands/__init__.py similarity index 89% rename from bootstrapvz/plugins/image_commands/__init__.py rename to bootstrapvz/plugins/commands/__init__.py index 1642e0d..4d1600f 100644 --- a/bootstrapvz/plugins/image_commands/__init__.py +++ b/bootstrapvz/plugins/commands/__init__.py @@ -2,7 +2,7 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/commands/manifest-schema.yml b/bootstrapvz/plugins/commands/manifest-schema.yml new file mode 100644 index 0000000..0d7c2e5 --- /dev/null +++ b/bootstrapvz/plugins/commands/manifest-schema.yml @@ -0,0 +1,22 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Commands plugin manifest +type: object +properties: + plugins: + type: object + properties: + commands: + type: object + properties: + commands: + items: + items: + type: string + minItems: 1 + type: array + minItems: 1 + type: array + required: [commands] + additionalProperties: false + required: [commands] diff --git a/bootstrapvz/plugins/image_commands/tasks.py b/bootstrapvz/plugins/commands/tasks.py similarity index 56% rename from bootstrapvz/plugins/image_commands/tasks.py rename to bootstrapvz/plugins/commands/tasks.py index 70ec370..2dd6eda 100644 --- a/bootstrapvz/plugins/image_commands/tasks.py +++ b/bootstrapvz/plugins/commands/tasks.py @@ -3,12 +3,13 @@ from bootstrapvz.common import phases class ImageExecuteCommand(Task): - description = 'Execute command in the image' - phase = phases.system_modification + description = 'Executing commands in the image' + phase = phases.user_modification @classmethod def run(cls, info): from bootstrapvz.common.tools import log_check_call - for raw_command in info.manifest.plugins['image_commands']['commands']: + for raw_command in info.manifest.plugins['commands']['commands']: command = map(lambda part: part.format(root=info.root, **info.manifest_vars), raw_command) - log_check_call(command) + shell = len(command) == 1 + log_check_call(command, shell=shell) diff --git a/bootstrapvz/plugins/docker_daemon/README.rst b/bootstrapvz/plugins/docker_daemon/README.rst new file mode 100644 index 0000000..c559ab1 --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/README.rst @@ -0,0 +1,18 @@ +Docker daemon +------------- + +Install `docker `__ daemon in the image. Uses +init scripts for the official repository. + +This plugin can only be used if the distribution being bootstrapped is +at least ``wheezy``, as Docker needs a kernel version ``3.8`` or higher, +which is available at the ``wheezy-backports`` repository. There's also +an architecture requirement, as it runs only on ``amd64``. + +Settings +~~~~~~~~ + +- ``version``: Selects the docker version to install. To select the + latest version simply omit this setting. + Default: ``latest`` + ``optional`` diff --git a/bootstrapvz/plugins/docker_daemon/__init__.py b/bootstrapvz/plugins/docker_daemon/__init__.py new file mode 100644 index 0000000..3a72b6b --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/__init__.py @@ -0,0 +1,27 @@ +import os.path +import tasks +from bootstrapvz.common.tasks import apt +from bootstrapvz.common.releases import wheezy + + +def validate_manifest(data, validator, error): + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + from bootstrapvz.common.releases import get_release + if get_release(data['system']['release']) == wheezy: + # prefs is a generator of apt preferences across files in the manifest + prefs = (item for vals in data.get('packages', {}).get('preferences', {}).values() for item in vals) + if not any('linux-image' in item['package'] and 'wheezy-backports' in item['pin'] for item in prefs): + msg = 'The backports kernel is required for the docker daemon to function properly' + error(msg, ['packages', 'preferences']) + + +def resolve_tasks(taskset, manifest): + if manifest.release == wheezy: + taskset.add(apt.AddBackports) + taskset.add(tasks.AddDockerDeps) + taskset.add(tasks.AddDockerBinary) + taskset.add(tasks.AddDockerInit) + taskset.add(tasks.EnableMemoryCgroup) + if len(manifest.plugins['docker_daemon'].get('pull_images', [])) > 0: + taskset.add(tasks.PullDockerImages) diff --git a/bootstrapvz/plugins/docker_daemon/assets/default/docker b/bootstrapvz/plugins/docker_daemon/assets/default/docker new file mode 100644 index 0000000..b2fc34d --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/assets/default/docker @@ -0,0 +1,19 @@ +# Docker Upstart and SysVinit configuration file + +# Customize location of Docker binary (especially for development testing). +#DOCKER="/usr/local/bin/docker" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# Use DOCKER_NOFILE to set ulimit -n before starting Docker. +#DOCKER_NOFILE=65536 + +# Use DOCKER_LOCKEDMEMORY to set ulimit -l before starting Docker. +#DOCKER_LOCKEDMEMORY=unlimited + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/bootstrapvz/plugins/docker_daemon/assets/init.d/docker b/bootstrapvz/plugins/docker_daemon/assets/init.d/docker new file mode 100644 index 0000000..829cab5 --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/assets/init.d/docker @@ -0,0 +1,137 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=$(basename $0) + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKER=/usr/bin/$BASE +DOCKER_PIDFILE=/var/run/$BASE.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) +if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 +fi + +# Check docker is present +if [ ! -x $DOCKER ]; then + log_failure_msg "$DOCKER not present or not executable" + exit 1 +fi + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + if [ -n "$DOCKER_NOFILE" ]; then + ulimit -n $DOCKER_NOFILE + fi + + if [ -n "$DOCKER_LOCKEDMEMORY" ]; then + ulimit -l $DOCKER_LOCKEDMEMORY + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKER" \ + --pidfile "$DOCKER_PIDFILE" \ + -- \ + -d -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE" + log_end_msg $? + ;; + + restart) + fail_unless_root + docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + fail_unless_root + $0 restart + ;; + + status) + status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/bootstrapvz/plugins/docker_daemon/manifest-schema.yml b/bootstrapvz/plugins/docker_daemon/manifest-schema.yml new file mode 100644 index 0000000..62928d3 --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/manifest-schema.yml @@ -0,0 +1,29 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Install Docker plugin manifest +type: object +properties: + system: + type: object + properties: + architecture: + type: string + enum: [amd64] + release: + not: + type: string + enum: + - squeeze + - oldstable + plugins: + type: object + properties: + docker_daemon: + type: object + properties: + version: + pattern: '^\d\.\d{1,2}\.\d$' + type: string + docker_opts: + type: string + additionalProperties: false diff --git a/bootstrapvz/plugins/docker_daemon/tasks.py b/bootstrapvz/plugins/docker_daemon/tasks.py new file mode 100644 index 0000000..59bef18 --- /dev/null +++ b/bootstrapvz/plugins/docker_daemon/tasks.py @@ -0,0 +1,122 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases +from bootstrapvz.common.tasks import grub +from bootstrapvz.common.tasks import initd +from bootstrapvz.common.tools import log_check_call +from bootstrapvz.common.tools import sed_i +from bootstrapvz.providers.gce.tasks import boot as gceboot +import os +import os.path +import shutil +import subprocess +import time + +ASSETS_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), 'assets')) + + +class AddDockerDeps(Task): + description = 'Add packages for docker deps' + phase = phases.package_installation + DOCKER_DEPS = ['aufs-tools', 'btrfs-tools', 'git', 'iptables', + 'procps', 'xz-utils', 'ca-certificates'] + + @classmethod + def run(cls, info): + for pkg in cls.DOCKER_DEPS: + info.packages.add(pkg) + + +class AddDockerBinary(Task): + description = 'Add docker binary' + phase = phases.system_modification + + @classmethod + def run(cls, info): + docker_version = info.manifest.plugins['docker_daemon'].get('version', False) + docker_url = 'https://get.docker.io/builds/Linux/x86_64/docker-' + if docker_version: + docker_url += docker_version + else: + docker_url += 'latest' + bin_docker = os.path.join(info.root, 'usr/bin/docker') + log_check_call(['wget', '-O', bin_docker, docker_url]) + os.chmod(bin_docker, 0755) + + +class AddDockerInit(Task): + description = 'Add docker init script' + phase = phases.system_modification + successors = [initd.InstallInitScripts] + + @classmethod + def run(cls, info): + init_src = os.path.join(ASSETS_DIR, 'init.d/docker') + info.initd['install']['docker'] = init_src + default_src = os.path.join(ASSETS_DIR, 'default/docker') + default_dest = os.path.join(info.root, 'etc/default/docker') + shutil.copy(default_src, default_dest) + docker_opts = info.manifest.plugins['docker_daemon'].get('docker_opts') + if docker_opts: + sed_i(default_dest, r'^#*DOCKER_OPTS=.*$', 'DOCKER_OPTS="%s"' % docker_opts) + + +class EnableMemoryCgroup(Task): + description = 'Change grub configuration to enable the memory cgroup' + phase = phases.system_modification + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] + predecessors = [grub.ConfigureGrub, gceboot.ConfigureGrub] + + @classmethod + def run(cls, info): + grub_config = os.path.join(info.root, 'etc/default/grub') + sed_i(grub_config, r'^(GRUB_CMDLINE_LINUX*=".*)"\s*$', r'\1 cgroup_enable=memory"') + + +class PullDockerImages(Task): + description = 'Pull docker images' + phase = phases.system_modification + predecessors = [AddDockerBinary] + + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + from subprocess import CalledProcessError + images = info.manifest.plugins['docker_daemon'].get('pull_images', []) + retries = info.manifest.plugins['docker_daemon'].get('pull_images_retries', 10) + + bin_docker = os.path.join(info.root, 'usr/bin/docker') + graph_dir = os.path.join(info.root, 'var/lib/docker') + socket = 'unix://' + os.path.join(info.workspace, 'docker.sock') + pidfile = os.path.join(info.workspace, 'docker.pid') + + try: + # start docker daemon temporarly. + daemon = subprocess.Popen([bin_docker, '-d', '--graph', graph_dir, '-H', socket, '-p', pidfile]) + # wait for docker daemon to start. + for _ in range(retries): + try: + log_check_call([bin_docker, '-H', socket, 'version']) + break + except CalledProcessError: + time.sleep(1) + for img in images: + # docker load if tarball. + if img.endswith('.tar.gz') or img.endswith('.tgz'): + cmd = [bin_docker, '-H', socket, 'load', '-i', img] + try: + log_check_call(cmd) + except CalledProcessError as e: + msg = 'error {e} loading docker image {img}.'.format(img=img, e=e) + raise TaskError(msg) + # docker pull if image name. + else: + cmd = [bin_docker, '-H', socket, 'pull', img] + try: + log_check_call(cmd) + except CalledProcessError as e: + msg = 'error {e} pulling docker image {img}.'.format(img=img, e=e) + raise TaskError(msg) + finally: + # shutdown docker daemon. + daemon.terminate() + os.remove(os.path.join(info.workspace, 'docker.sock')) diff --git a/bootstrapvz/plugins/ec2_launch/__init__.py b/bootstrapvz/plugins/ec2_launch/__init__.py new file mode 100644 index 0000000..69c29c7 --- /dev/null +++ b/bootstrapvz/plugins/ec2_launch/__init__.py @@ -0,0 +1,13 @@ +def validate_manifest(data, validator, error): + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + + +def resolve_tasks(taskset, manifest): + import tasks + taskset.add(tasks.LaunchEC2Instance) + if 'print_public_ip' in manifest.plugins['ec2_launch']: + taskset.add(tasks.PrintPublicIPAddress) + if manifest.plugins['ec2_launch'].get('deregister_ami', False): + taskset.add(tasks.DeregisterAMI) diff --git a/bootstrapvz/plugins/ec2_launch/manifest-schema.yml b/bootstrapvz/plugins/ec2_launch/manifest-schema.yml new file mode 100644 index 0000000..9d7992e --- /dev/null +++ b/bootstrapvz/plugins/ec2_launch/manifest-schema.yml @@ -0,0 +1,20 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: EC2-launch plugin manifest +type: object +properties: + plugins: + type: object + properties: + ec2_launch: + type: object + properties: + security_group_ids: + type: array + items: {type: string} + uniqueItems: true + instance_type: {type: string} + print_public_ip: {type: string} + tags: {type: object} + deregister_ami: {type: boolean} + additionalProperties: false diff --git a/bootstrapvz/plugins/ec2_launch/tasks.py b/bootstrapvz/plugins/ec2_launch/tasks.py new file mode 100644 index 0000000..5d4abc5 --- /dev/null +++ b/bootstrapvz/plugins/ec2_launch/tasks.py @@ -0,0 +1,85 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases +from bootstrapvz.providers.ec2.tasks import ami +import logging + + +# TODO: Merge with the method available in wip-integration-tests branch +def waituntil(predicate, timeout=5, interval=0.05): + import time + threshhold = time.time() + timeout + while time.time() < threshhold: + if predicate(): + return True + time.sleep(interval) + return False + + +class LaunchEC2Instance(Task): + description = 'Launching EC2 instance' + phase = phases.image_registration + predecessors = [ami.RegisterAMI] + + @classmethod + def run(cls, info): + conn = info._ec2['connection'] + r = conn.run_instances(info._ec2['image'], + security_group_ids=info.manifest.plugins['ec2_launch'].get('security_group_ids'), + instance_type=info.manifest.plugins['ec2_launch'].get('instance_type', 't2.micro')) + info._ec2['instance'] = r.instances[0] + + if 'tags' in info.manifest.plugins['ec2_launch']: + def apply_format(v): + return v.format(**info.manifest_vars) + tags = info.manifest.plugins['ec2_launch']['tags'] + r = {k: apply_format(v) for k, v in tags.items()} + conn.create_tags([info._ec2['instance'].id], r) + + +class PrintPublicIPAddress(Task): + description = 'Waiting for the instance to launch' + phase = phases.image_registration + predecessors = [LaunchEC2Instance] + + @classmethod + def run(cls, info): + ec2 = info._ec2 + logger = logging.getLogger(__name__) + filename = info.manifest.plugins['ec2_launch']['print_public_ip'] + if not filename: + filename = '/dev/null' + f = open(filename, 'w') + + def instance_has_ip(): + ec2['instance'].update() + return ec2['instance'].ip_address + + if waituntil(instance_has_ip, timeout=120, interval=5): + logger.info('******* EC2 IP ADDRESS: %s *******' % ec2['instance'].ip_address) + f.write(ec2['instance'].ip_address) + else: + logger.error('Could not get IP address for the instance') + f.write('') + + f.close() + + +class DeregisterAMI(Task): + description = 'Deregistering AMI' + phase = phases.image_registration + predecessors = [LaunchEC2Instance] + + @classmethod + def run(cls, info): + ec2 = info._ec2 + logger = logging.getLogger(__name__) + + def instance_running(): + ec2['instance'].update() + return ec2['instance'].state == 'running' + + if waituntil(instance_running, timeout=120, interval=5): + info._ec2['connection'].deregister_image(info._ec2['image']) + info._ec2['snapshot'].delete() + else: + logger.error('Timeout while booting instance') diff --git a/bootstrapvz/plugins/file_copy/__init__.py b/bootstrapvz/plugins/file_copy/__init__.py new file mode 100644 index 0000000..aea8352 --- /dev/null +++ b/bootstrapvz/plugins/file_copy/__init__.py @@ -0,0 +1,16 @@ +import tasks + + +def validate_manifest(data, validator, error): + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + + +def resolve_tasks(taskset, manifest): + taskset.add(tasks.ValidateSourcePaths) + + if ('mkdirs' in manifest.plugins['file_copy']): + taskset.add(tasks.MkdirCommand) + if ('files' in manifest.plugins['file_copy']): + taskset.add(tasks.FileCopyCommand) diff --git a/bootstrapvz/plugins/file_copy/manifest-schema.yml b/bootstrapvz/plugins/file_copy/manifest-schema.yml new file mode 100644 index 0000000..0a65dde --- /dev/null +++ b/bootstrapvz/plugins/file_copy/manifest-schema.yml @@ -0,0 +1,45 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +properties: + plugins: + properties: + file_copy: + properties: + mkdirs: + items: + dir: + $ref: '#/definitions/absolute_path' + permissions: + type: string + owner: + type: string + group: + type: string + files: + items: + src: + $ref: '#/definitions/absolute_path' + dst: + $ref: '#/definitions/absolute_path' + permissions: + type: string + owner: + type: string + group: + type: string + minItems: 1 + type: array + required: + - src + - dst + required: + - files + type: object + additionalProperties: false + required: + - file_copy + type: object +required: +- plugins +title: File copy plugin manifest +type: object diff --git a/bootstrapvz/plugins/file_copy/tasks.py b/bootstrapvz/plugins/file_copy/tasks.py new file mode 100644 index 0000000..90e1f08 --- /dev/null +++ b/bootstrapvz/plugins/file_copy/tasks.py @@ -0,0 +1,65 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases + +import os +import shutil + + +class ValidateSourcePaths(Task): + description = 'Check whether the files to be copied exist' + phase = phases.preparation + + @classmethod + def run(cls, info): + from bootstrapvz.common.exceptions import TaskError + for file_entry in info.manifest.plugins['file_copy']['files']: + srcfile = file_entry['src'] + if not os.path.isfile(srcfile): + msg = 'The source file %s does not exist.' % srcfile + raise TaskError(msg) + + +def modify_path(info, path, entry): + from bootstrapvz.common.tools import log_check_call + if 'permissions' in entry: + # We wrap the permissions string in str() in case + # the user specified a numeric bitmask + chmod_command = ['chroot', info.root, 'chmod', str(entry['permissions']), path] + log_check_call(chmod_command) + + if 'owner' in entry: + chown_command = ['chroot', info.root, 'chown', entry['owner'], path] + log_check_call(chown_command) + + if 'group' in entry: + chgrp_command = ['chroot', info.root, 'chgrp', entry['group'], path] + log_check_call(chgrp_command) + + +class MkdirCommand(Task): + description = 'Creating directories requested by user' + phase = phases.user_modification + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + + for dir_entry in info.manifest.plugins['file_copy']['mkdirs']: + mkdir_command = ['chroot', info.root, 'mkdir', '-p', dir_entry['dir']] + log_check_call(mkdir_command) + modify_path(info, dir_entry['dir'], dir_entry) + + +class FileCopyCommand(Task): + description = 'Copying user specified files into the image' + phase = phases.user_modification + predecessors = [MkdirCommand] + + @classmethod + def run(cls, info): + for file_entry in info.manifest.plugins['file_copy']['files']: + # note that we don't use os.path.join because it can't + # handle absolute paths, which 'dst' most likely is. + final_destination = os.path.normpath("%s/%s" % (info.root, file_entry['dst'])) + shutil.copy(file_entry['src'], final_destination) + modify_path(info, file_entry['dst'], file_entry) diff --git a/bootstrapvz/plugins/google_cloud_sdk/__init__.py b/bootstrapvz/plugins/google_cloud_sdk/__init__.py new file mode 100644 index 0000000..7a7ba47 --- /dev/null +++ b/bootstrapvz/plugins/google_cloud_sdk/__init__.py @@ -0,0 +1,6 @@ +import tasks + + +def resolve_tasks(taskset, manifest): + taskset.add(tasks.InstallCloudSDK) + taskset.add(tasks.RemoveCloudSDKTarball) diff --git a/bootstrapvz/plugins/google_cloud_sdk/tasks.py b/bootstrapvz/plugins/google_cloud_sdk/tasks.py new file mode 100644 index 0000000..33c21a2 --- /dev/null +++ b/bootstrapvz/plugins/google_cloud_sdk/tasks.py @@ -0,0 +1,71 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases +from bootstrapvz.common.tools import log_check_call +import os + + +class InstallCloudSDK(Task): + description = 'Install Cloud SDK, not yet packaged' + phase = phases.system_modification + + @classmethod + def run(cls, info): + import contextlib + import re + import urllib + import urlparse + + # The current download URL needs to be determined dynamically via a sha1sum file. Here's the + # necessary logic. + + cloudsdk_download_site = 'https://dl.google.com/dl/cloudsdk/release/' + cloudsdk_filelist_url = urlparse.urljoin(cloudsdk_download_site, 'sha1.txt') + cloudsdk_pathname_regexp = r'^packages/google-cloud-sdk-coretools-linux-[0-9]+\.tar\.gz$' + cloudsdk_filename = '' # This is set in the 'with' block below. + + with contextlib.closing(urllib.urlopen(cloudsdk_filelist_url)) as cloudsdk_filelist: + # cloudsdk_filelist is in sha1sum format, so + # pathname is a suffix relative to cloudsdk_download_site + # + # Retrieve the pathname which matches cloudsdk_pathname_regexp. It's currently safe to + # assume that only one pathname will match. + for cloudsdk_filelist_line in cloudsdk_filelist: + _, pathname = cloudsdk_filelist_line.split() + if re.match(cloudsdk_pathname_regexp, pathname): + # Don't use os.path.basename since we're actually parsing a URL + # suffix, not a path. Same probable result, but wrong semantics. + # + # The format of pathname is already known to match + # cloudsdk_pathname_regexp, so this is safe. + _, cloudsdk_filename = pathname.rsplit('/', 1) + break + + cloudsdk_download_dest = os.path.join(info.workspace, cloudsdk_filename) + + cloudsdk_url = urlparse.urljoin(cloudsdk_download_site, pathname) + + urllib.urlretrieve(cloudsdk_url, cloudsdk_download_dest) + + # Make a "mental note" of which file to remove in the system cleaning phase. + info._google_cloud_sdk['tarball_pathname'] = cloudsdk_download_dest + + cloudsdk_directory = os.path.join(info.root, 'usr/local/share/google') + os.makedirs(cloudsdk_directory) + log_check_call(['tar', 'xaf', cloudsdk_download_dest, '-C', cloudsdk_directory]) + + # We need to symlink certain programs from the Cloud SDK bin directory into /usr/local/bin. + # Keep a list and do it in a unified way. Naturally this will go away with proper packaging. + gcloud_programs = ['bq', 'gsutil', 'gcutil', 'gcloud', 'git-credential-gcloud.sh'] + for prog in gcloud_programs: + src = os.path.join('..', 'share', 'google', 'google-cloud-sdk', 'bin', prog) + dest = os.path.join(info.root, 'usr', 'local', 'bin', prog) + os.symlink(src, dest) + + +class RemoveCloudSDKTarball(Task): + description = 'Remove tarball for Cloud SDK' + phase = phases.system_cleaning + + @classmethod + def run(cls, info): + os.remove(info._google_cloud_sdk['tarball_pathname']) diff --git a/bootstrapvz/plugins/image_commands/README.md b/bootstrapvz/plugins/image_commands/README.md deleted file mode 100644 index 61583d5..0000000 --- a/bootstrapvz/plugins/image_commands/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Image script plugin - -This plugin gives the possibility to the user to execute commands. - -Plugin is defined in the manifest file, plugin section with: - - "image_commands": { - "commands": [ [ "touch", "/var/www/index.html" ]], - } - -The *commands* element is an array of commands. Each command is an array describing the executable and its arguments. - -Command is executed in current context. It is possible to use variables to access the image or execute chroot commands in the image. - -Available variables are: - {root} : image mount point (to copy files for example or chroot commands) - -Example: - - [[ "touch", "{root}/var/www/hello" ], - [ "/usr/sbin/chroot", "{root}", "touch", "/var/www/hello"]] - diff --git a/bootstrapvz/plugins/image_commands/manifest-schema.json b/bootstrapvz/plugins/image_commands/manifest-schema.json deleted file mode 100644 index 9785c90..0000000 --- a/bootstrapvz/plugins/image_commands/manifest-schema.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Image commands plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "image_commands": { - "type": "object", - "properties": { - "commands": { - "type": "array", - "items": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1 - }, - "minItems": 1 - } - }, - "required": ["commands"] - } - }, - "required": ["image_commands"] - } - }, - "required": ["plugins"] -} diff --git a/bootstrapvz/plugins/minimize_size/README.rst b/bootstrapvz/plugins/minimize_size/README.rst new file mode 100644 index 0000000..bb7860c --- /dev/null +++ b/bootstrapvz/plugins/minimize_size/README.rst @@ -0,0 +1,37 @@ +minimize size +------------- + +This plugin can be used to reduce the size of the resulting image. Often +virtual volumes are much smaller than their reported size until any data +is written to them. During the bootstrapping process temporary data like +the aptitude cache is written to the volume only to be removed again. + +The minimize size plugin employs three different strategies to keep a +low volume footprint: + +- Mount folders from the host into key locations of the image volume to + avoid any unneccesary disk writes. +- Use `zerofree `__ to + deallocate unused sectors on the volume. On an unpartitioned volume + this will be done for the entire volume, while it will only happen on + the root partition for partitioned volumes. +- Use + `vmware-vdiskmanager `__ + to shrink the real volume size (only applicable when using vmdk + backing). The tool is part of the `VMWare + Workstation `__ + package. + +Settings +~~~~~~~~ + +- ``zerofree``: Specifies if it should mark unallocated blocks as + zeroes, so the volume could be better shrunk after this. + Valid values: true, false + Default: false + ``optional`` +- ``shrink``: Whether the volume should be shrunk. This setting works + best in conjunction with the zerofree tool. + Valid values: true, false + Default: false + ``optional`` diff --git a/bootstrapvz/plugins/minimize_size/__init__.py b/bootstrapvz/plugins/minimize_size/__init__.py index 3f8c583..107da6e 100644 --- a/bootstrapvz/plugins/minimize_size/__init__.py +++ b/bootstrapvz/plugins/minimize_size/__init__.py @@ -3,7 +3,7 @@ import tasks def validate_manifest(data, validator, error): import os.path - schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.json') + schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.yml') validator(data, schema_path) if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk': error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink']) diff --git a/bootstrapvz/plugins/minimize_size/manifest-schema.json b/bootstrapvz/plugins/minimize_size/manifest-schema.json deleted file mode 100644 index 0181fa2..0000000 --- a/bootstrapvz/plugins/minimize_size/manifest-schema.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Minimize size plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "minimize_size": { - "type": "object", - "properties": { - "shrink": { "type": "boolean" }, - "zerofree": { "type": "boolean" } - } - } - } - } - } -} diff --git a/bootstrapvz/plugins/minimize_size/manifest-schema.yml b/bootstrapvz/plugins/minimize_size/manifest-schema.yml new file mode 100644 index 0000000..5a36be8 --- /dev/null +++ b/bootstrapvz/plugins/minimize_size/manifest-schema.yml @@ -0,0 +1,16 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +properties: + plugins: + properties: + minimize_size: + properties: + shrink: + type: boolean + zerofree: + type: boolean + type: object + additionalProperties: false + type: object +title: Minimize size plugin manifest +type: object diff --git a/bootstrapvz/plugins/minimize_size/tasks.py b/bootstrapvz/plugins/minimize_size/tasks.py index ef7ff97..5cba758 100644 --- a/bootstrapvz/plugins/minimize_size/tasks.py +++ b/bootstrapvz/plugins/minimize_size/tasks.py @@ -81,4 +81,6 @@ class ShrinkVolume(Task): @classmethod def run(cls, info): from bootstrapvz.common.tools import log_check_call + perm = os.stat(info.volume.image_path).st_mode & 0777 log_check_call(['/usr/bin/vmware-vdiskmanager', '-k', info.volume.image_path]) + os.chmod(info.volume.image_path, perm) diff --git a/bootstrapvz/plugins/ntp/README.rst b/bootstrapvz/plugins/ntp/README.rst new file mode 100644 index 0000000..f90c81a --- /dev/null +++ b/bootstrapvz/plugins/ntp/README.rst @@ -0,0 +1,12 @@ +NTP +--- + +This plugins installs the Network Time Protocol daemon and optionally +defines which time servers it should use. + +Settings +~~~~~~~~ + +- ``servers``: A list of strings specifying which servers should be + used to synchronize the machine clock. + ``optional`` diff --git a/bootstrapvz/plugins/ntp/__init__.py b/bootstrapvz/plugins/ntp/__init__.py index 9a68d0f..858af72 100644 --- a/bootstrapvz/plugins/ntp/__init__.py +++ b/bootstrapvz/plugins/ntp/__init__.py @@ -1,6 +1,6 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/ntp/manifest-schema.json b/bootstrapvz/plugins/ntp/manifest-schema.json deleted file mode 100644 index c386045..0000000 --- a/bootstrapvz/plugins/ntp/manifest-schema.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "NTP plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "ntp": { - "type": "object", - "properties": { - "servers": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1 - } - } - } - } - } - } -} diff --git a/bootstrapvz/plugins/ntp/manifest-schema.yml b/bootstrapvz/plugins/ntp/manifest-schema.yml new file mode 100644 index 0000000..170095e --- /dev/null +++ b/bootstrapvz/plugins/ntp/manifest-schema.yml @@ -0,0 +1,16 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: NTP plugin manifest +type: object +properties: + plugins: + type: object + properties: + ntp: + type: object + properties: + servers: + type: array + items: {type: string} + minItems: 1 + additionalProperties: false diff --git a/bootstrapvz/plugins/opennebula/README.md b/bootstrapvz/plugins/opennebula/README.md deleted file mode 100644 index 49ab0c9..0000000 --- a/bootstrapvz/plugins/opennebula/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Open Nebula provider - -This provider adds OpenNebula contextualization to the virtual image (see http://opennebula.org/documentation:rel4.2:cong). - -It set ups the network and ssh keys. TO do so you should configure your virtual machine context with something like: - - ETH0_DNS $NETWORK[DNS, NETWORK_ID=2] - ETH0_GATEWAY $NETWORK[GATEWAY, NETWORK_ID=2] - ETH0_IP $NIC[IP, NETWORK_ID=2] - ETH0_MASK $NETWORK[MASK, NETWORK_ID=2] - ETH0_NETWORK $NETWORK[NETWORK, NETWORK_ID=2] - FILES path_to_my_ssh_public_key.pub - diff --git a/bootstrapvz/plugins/opennebula/README.rst b/bootstrapvz/plugins/opennebula/README.rst new file mode 100644 index 0000000..a29d7ae --- /dev/null +++ b/bootstrapvz/plugins/opennebula/README.rst @@ -0,0 +1,27 @@ +Open Nebula +----------- + +This plugin adds `OpenNebula +contextualization `__ +to the image, which sets up the network configuration and SSH keys. + +The virtual machine context should be configured as follows: + +:: + + ETH0_DNS $NETWORK[DNS, NETWORK_ID=2] + ETH0_GATEWAY $NETWORK[GATEWAY, NETWORK_ID=2] + ETH0_IP $NIC[IP, NETWORK_ID=2] + ETH0_MASK $NETWORK[MASK, NETWORK_ID=2] + ETH0_NETWORK $NETWORK[NETWORK, NETWORK_ID=2] + FILES path_to_my_ssh_public_key.pub + +The plugin will install all *.pub* files in the root authorized\_keys +file. When using the ec2 provider, the USER\_EC2\_DATA will be executed +if present. + +Settings +~~~~~~~~ + +This plugin has no settings. To enable it add ``"opennebula":{}`` to the +plugin section of the manifest. diff --git a/bootstrapvz/plugins/opennebula/__init__.py b/bootstrapvz/plugins/opennebula/__init__.py index 4ac6056..8681d18 100644 --- a/bootstrapvz/plugins/opennebula/__init__.py +++ b/bootstrapvz/plugins/opennebula/__init__.py @@ -1,7 +1,9 @@ -import tasks def resolve_tasks(taskset, manifest): - if manifest.system['release'] in ['wheezy', 'stable']: - taskset.add(tasks.AddBackports) + import tasks + from bootstrapvz.common.tasks import apt + from bootstrapvz.common.releases import wheezy + if manifest.release == wheezy: + taskset.add(apt.AddBackports) taskset.update([tasks.AddONEContextPackage]) diff --git a/bootstrapvz/plugins/opennebula/tasks.py b/bootstrapvz/plugins/opennebula/tasks.py index 410bc90..1dcac7e 100644 --- a/bootstrapvz/plugins/opennebula/tasks.py +++ b/bootstrapvz/plugins/opennebula/tasks.py @@ -3,29 +3,15 @@ from bootstrapvz.common.tasks import apt from bootstrapvz.common import phases -class AddBackports(Task): - description = 'Adding backports to the apt sources' - phase = phases.preparation - - @classmethod - def run(cls, info): - if info.source_lists.target_exists('{system.release}-backports'): - import logging - msg = ('{system.release}-backports target already exists').format(**info.manifest_vars) - logging.getLogger(__name__).info(msg) - else: - info.source_lists.add('backports', 'deb {apt_mirror} {system.release}-backports main') - info.source_lists.add('backports', 'deb-src {apt_mirror} {system.release}-backports main') - - class AddONEContextPackage(Task): description = 'Adding the OpenNebula context package' phase = phases.preparation - predecessors = [apt.AddDefaultSources, AddBackports] + predecessors = [apt.AddBackports] @classmethod def run(cls, info): target = None - if info.manifest.system['release'] in ['wheezy', 'stable']: + from bootstrapvz.common.releases import wheezy + if info.manifest.release == wheezy: target = '{system.release}-backports' info.packages.add('opennebula-context', target) diff --git a/bootstrapvz/plugins/pip_install/README.rst b/bootstrapvz/plugins/pip_install/README.rst new file mode 100644 index 0000000..3bed629 --- /dev/null +++ b/bootstrapvz/plugins/pip_install/README.rst @@ -0,0 +1,14 @@ +Pip install +----------- + +Install packages from the Python Package Index via pip. + +Installs ``build-essential`` and ``python-dev`` debian packages, so +Python extension modules can be built. + +Settings +~~~~~~~~ + +- ``packages``: Python packages to install, a list of strings. The list + can contain anything that ``pip install`` would accept as an + argument, for example ``awscli==1.3.13``. diff --git a/bootstrapvz/plugins/pip_install/__init__.py b/bootstrapvz/plugins/pip_install/__init__.py new file mode 100644 index 0000000..0f6810b --- /dev/null +++ b/bootstrapvz/plugins/pip_install/__init__.py @@ -0,0 +1,12 @@ +import tasks + + +def validate_manifest(data, validator, error): + import os.path + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) + validator(data, schema_path) + + +def resolve_tasks(taskset, manifest): + taskset.add(tasks.AddPipPackage) + taskset.add(tasks.PipInstallCommand) diff --git a/bootstrapvz/plugins/pip_install/manifest-schema.yml b/bootstrapvz/plugins/pip_install/manifest-schema.yml new file mode 100644 index 0000000..36ae5d0 --- /dev/null +++ b/bootstrapvz/plugins/pip_install/manifest-schema.yml @@ -0,0 +1,18 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Pip install plugin manifest +type: object +properties: + plugins: + type: object + properties: + pip_install: + type: object + properties: + packages: + type: array + items: + type: string + minItems: 1 + uniqueItems: true + additionalProperties: false diff --git a/bootstrapvz/plugins/pip_install/tasks.py b/bootstrapvz/plugins/pip_install/tasks.py new file mode 100644 index 0000000..c6ff89b --- /dev/null +++ b/bootstrapvz/plugins/pip_install/tasks.py @@ -0,0 +1,25 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases + + +class AddPipPackage(Task): + description = 'Adding `pip\' and Co. to the image packages' + phase = phases.preparation + + @classmethod + def run(cls, info): + for package_name in ('python-pip', 'build-essential', 'python-dev'): + info.packages.add(package_name) + + +class PipInstallCommand(Task): + description = 'Install python packages from pypi with pip' + phase = phases.system_modification + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import log_check_call + packages = info.manifest.plugins['pip_install']['packages'] + pip_install_command = ['chroot', info.root, 'pip', 'install'] + pip_install_command.extend(packages) + log_check_call(pip_install_command) diff --git a/bootstrapvz/plugins/prebootstrapped/README.rst b/bootstrapvz/plugins/prebootstrapped/README.rst new file mode 100644 index 0000000..bc30de8 --- /dev/null +++ b/bootstrapvz/plugins/prebootstrapped/README.rst @@ -0,0 +1,26 @@ +prebootstrapped +--------------- + +When developing for bootstrap-vz, testing can be quite tedious since the +bootstrapping process can take a while. The prebootstrapped plugin +solves that problem by creating a snapshot of your volume right after +all the software has been installed. The next time bootstrap-vz is run, +the plugin replaces all volume preparation and bootstrapping tasks and +recreates the volume from the snapshot instead. + +The plugin assumes that the users knows what he is doing (e.g. it +doesn't check whether bootstrap-vz is being run with a partitioned +volume configuration, while the snapshot is unpartitioned). + +When no snapshot or image is specified the plugin creates one and +outputs its ID/path. Specifying an ID/path enables the second mode of +operation which recreates the volume from the specified snapshot instead +of creating it from scratch. + +Settings +~~~~~~~~ + +- ``snapshot``: ID of the EBS snapshot to use. This setting only works + with EBS backed EC2 configurations. +- ``image``: Path to the loopbackvolume snapshot. This setting works + with all configurable volume backings except EBS. diff --git a/bootstrapvz/plugins/prebootstrapped/__init__.py b/bootstrapvz/plugins/prebootstrapped/__init__.py index dbda042..614e049 100644 --- a/bootstrapvz/plugins/prebootstrapped/__init__.py +++ b/bootstrapvz/plugins/prebootstrapped/__init__.py @@ -15,7 +15,7 @@ from bootstrapvz.common.tasks import partitioning def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) @@ -37,13 +37,13 @@ def resolve_tasks(taskset, manifest): guest_additions.InstallGuestAdditions, ] if manifest.volume['backing'] == 'ebs': - if 'snapshot' in settings and settings['snapshot'] is not None: + if settings.get('snapshot', None) is not None: taskset.add(CreateFromSnapshot) [taskset.discard(task) for task in skip_tasks] else: taskset.add(Snapshot) else: - if 'image' in settings and settings['image'] is not None: + if settings.get('image', None) is not None: taskset.add(CreateFromImage) [taskset.discard(task) for task in skip_tasks] else: diff --git a/bootstrapvz/plugins/prebootstrapped/manifest-schema.json b/bootstrapvz/plugins/prebootstrapped/manifest-schema.json deleted file mode 100644 index d3a19d7..0000000 --- a/bootstrapvz/plugins/prebootstrapped/manifest-schema.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Prebootstrapped plugin manifest", - "type": "object", - "properties": { - "volume": { - "type": "object", - "properties": { - "backing": { - "type": "string", - "enum": ["raw", "ebs", "s3", "vdi", "vmdk"] - } - }, - "required": ["backing"] - }, - "plugins": { - "type": "object", - "properties": { - "prebootstrapped": { - "type": "object", - "properties": { - "snapshot": { - "type": "string" - }, - "image": { - "type": "string" - } - } - } - } - } - }, - "required": ["volume"] -} diff --git a/bootstrapvz/plugins/prebootstrapped/manifest-schema.yml b/bootstrapvz/plugins/prebootstrapped/manifest-schema.yml new file mode 100644 index 0000000..d01820c --- /dev/null +++ b/bootstrapvz/plugins/prebootstrapped/manifest-schema.yml @@ -0,0 +1,26 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Prebootstrapped plugin manifest +type: object +properties: + volume: + type: object + properties: + backing: + type: string + enum: + - raw + - ebs + - s3 + - vdi + - vmdk + required: [backing] + plugins: + type: object + properties: + prebootstrapped: + type: object + properties: + image: {type: string} + snapshot: {type: string} + additionalProperties: false diff --git a/bootstrapvz/plugins/prebootstrapped/tasks.py b/bootstrapvz/plugins/prebootstrapped/tasks.py index d56c666..df09782 100644 --- a/bootstrapvz/plugins/prebootstrapped/tasks.py +++ b/bootstrapvz/plugins/prebootstrapped/tasks.py @@ -4,7 +4,7 @@ from bootstrapvz.common.tasks import volume from bootstrapvz.common.tasks import packages from bootstrapvz.providers.virtualbox.tasks import guest_additions from bootstrapvz.providers.ec2.tasks import ebs -from bootstrapvz.common.fs import remount +from bootstrapvz.common.fs import unmounted from shutil import copyfile import os.path import time @@ -19,9 +19,9 @@ class Snapshot(Task): @classmethod def run(cls, info): - def mk_snapshot(): - return info.volume.snapshot() - snapshot = remount(info.volume, mk_snapshot) + snapshot = None + with unmounted(info.volume): + snapshot = info.volume.snapshot() msg = 'A snapshot of the bootstrapped volume was created. ID: ' + snapshot.id log.info(msg) @@ -34,7 +34,7 @@ class CreateFromSnapshot(Task): @classmethod def run(cls, info): snapshot = info.manifest.plugins['prebootstrapped']['snapshot'] - ebs_volume = info._ec2['connection'].create_volume(info.volume.size.get_qty_in('GiB'), + ebs_volume = info._ec2['connection'].create_volume(info.volume.size.bytes.get_qty_in('GiB'), info._ec2['host']['availabilityZone'], snapshot=snapshot) while ebs_volume.volume_state() != 'available': @@ -55,9 +55,8 @@ class CopyImage(Task): loopback_backup_name = 'volume-{id}.{ext}.backup'.format(id=info.run_id, ext=info.volume.extension) destination = os.path.join(info.manifest.bootstrapper['workspace'], loopback_backup_name) - def mk_snapshot(): + with unmounted(info.volume): copyfile(info.volume.image_path, destination) - remount(info.volume, mk_snapshot) msg = 'A copy of the bootstrapped volume was created. Path: ' + destination log.info(msg) @@ -80,12 +79,17 @@ def set_fs_states(volume): volume.fsm.current = 'detached' p_map = volume.partition_map - partitions_state = 'attached' from bootstrapvz.base.fs.partitionmaps.none import NoPartitions - if isinstance(p_map, NoPartitions): - partitions_state = 'formatted' - else: + if not isinstance(p_map, NoPartitions): p_map.fsm.current = 'unmapped' - partitions_state = 'unmapped_fmt' + + from bootstrapvz.base.fs.partitions.unformatted import UnformattedPartition + from bootstrapvz.base.fs.partitions.single import SinglePartition for partition in p_map.partitions: - partition.fsm.current = partitions_state + if isinstance(partition, UnformattedPartition): + partition.fsm.current = 'unmapped' + continue + if isinstance(partition, SinglePartition): + partition.fsm.current = 'formatted' + continue + partition.fsm.current = 'unmapped_fmt' diff --git a/bootstrapvz/plugins/puppet/README.rst b/bootstrapvz/plugins/puppet/README.rst new file mode 100644 index 0000000..530694f --- /dev/null +++ b/bootstrapvz/plugins/puppet/README.rst @@ -0,0 +1,24 @@ +Puppet +------ + +Installs `puppet `__ and optionally applies a +manifest inside the chroot. You can also have it copy your puppet +configuration into the image so it is readily available once the image +is booted. + +Keep in mind that when applying a manifest, the system is in a chrooted +environment. This can prevent daemons from running properly (e.g. +listening to ports), they will also need to be shut down gracefully +(which bootstrap-vz cannot do) before unmounting the volume. It is +advisable to avoid starting any daemons inside the chroot at all. + +Settings +~~~~~~~~ + +- ``manifest``: Path to the puppet manifest that should be applied. + ``optional`` +- ``assets``: Path to puppet assets. The contents will be copied into + ``/etc/puppet`` on the image. Any existing files will be overwritten. + ``optional`` +- ``enable_agent``: Whether the puppet agent daemon should be enabled. + ``optional`` diff --git a/bootstrapvz/plugins/puppet/__init__.py b/bootstrapvz/plugins/puppet/__init__.py index 5a4fcbf..529c1d5 100644 --- a/bootstrapvz/plugins/puppet/__init__.py +++ b/bootstrapvz/plugins/puppet/__init__.py @@ -3,7 +3,7 @@ import tasks def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/puppet/manifest-schema.json b/bootstrapvz/plugins/puppet/manifest-schema.json deleted file mode 100644 index cd3891f..0000000 --- a/bootstrapvz/plugins/puppet/manifest-schema.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Puppet plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "puppet": { - "type": "object", - "properties": { - "manifest": { "$ref": "#/definitions/absolute_path" }, - "assets": { "$ref": "#/definitions/absolute_path" }, - "enable_agent": { "type": "boolean" } - }, - "minProperties": 1, - "additionalProperties": false - } - } - } - }, - "definitions": { - "absolute_path": { - "type": "string", - "pattern": "^/[^\\0]+$" - } - } -} diff --git a/bootstrapvz/plugins/puppet/manifest-schema.yml b/bootstrapvz/plugins/puppet/manifest-schema.yml new file mode 100644 index 0000000..3e4ff76 --- /dev/null +++ b/bootstrapvz/plugins/puppet/manifest-schema.yml @@ -0,0 +1,20 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Puppet plugin manifest +type: object +properties: + plugins: + type: object + properties: + puppet: + type: object + properties: + assets: {$ref: '#/definitions/absolute_path'} + enable_agent: {type: boolean} + manifest: {$ref: '#/definitions/absolute_path'} + minProperties: 1 + additionalProperties: false +definitions: + absolute_path: + pattern: ^/[^\0]+$ + type: string diff --git a/bootstrapvz/plugins/puppet/tasks.py b/bootstrapvz/plugins/puppet/tasks.py index 2627e69..1efac12 100644 --- a/bootstrapvz/plugins/puppet/tasks.py +++ b/bootstrapvz/plugins/puppet/tasks.py @@ -1,7 +1,5 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt -from bootstrapvz.common.tasks import network from bootstrapvz.common.tools import sed_i import os @@ -41,7 +39,6 @@ class CheckManifestPath(Task): class AddPackages(Task): description = 'Add puppet package' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -60,9 +57,8 @@ class CopyPuppetAssets(Task): class ApplyPuppetManifest(Task): description = 'Applying puppet manifest' - phase = phases.system_modification + phase = phases.user_modification predecessors = [CopyPuppetAssets] - successors = [network.RemoveHostname, network.RemoveDNSInfo] @classmethod def run(cls, info): diff --git a/bootstrapvz/plugins/root_password/README.rst b/bootstrapvz/plugins/root_password/README.rst new file mode 100644 index 0000000..c2f37c8 --- /dev/null +++ b/bootstrapvz/plugins/root_password/README.rst @@ -0,0 +1,11 @@ +root password +------------- + +Sets the root password. This plugin removes the task that disables the +SSH password authentication. + +Settings +~~~~~~~~ + +- ``password``: The password for the root user. + ``required`` diff --git a/bootstrapvz/plugins/root_password/__init__.py b/bootstrapvz/plugins/root_password/__init__.py index 7ca6870..f97e9f8 100644 --- a/bootstrapvz/plugins/root_password/__init__.py +++ b/bootstrapvz/plugins/root_password/__init__.py @@ -2,7 +2,7 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) @@ -10,4 +10,5 @@ def resolve_tasks(taskset, manifest): from bootstrapvz.common.tasks import ssh from tasks import SetRootPassword taskset.discard(ssh.DisableSSHPasswordAuthentication) + taskset.add(ssh.EnableRootLogin) taskset.add(SetRootPassword) diff --git a/bootstrapvz/plugins/root_password/manifest-schema.json b/bootstrapvz/plugins/root_password/manifest-schema.json deleted file mode 100644 index fee0b5f..0000000 --- a/bootstrapvz/plugins/root_password/manifest-schema.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Root password plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "root_password": { - "type": "object", - "properties": { - "password": { - "type": "string" - } - }, - "required": ["password"] - } - } - } - } -} diff --git a/bootstrapvz/plugins/root_password/manifest-schema.yml b/bootstrapvz/plugins/root_password/manifest-schema.yml new file mode 100644 index 0000000..f91ef63 --- /dev/null +++ b/bootstrapvz/plugins/root_password/manifest-schema.yml @@ -0,0 +1,14 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Root password plugin manifest +type: object +properties: + plugins: + type: object + properties: + root_password: + type: object + properties: + password: {type: string} + required: [password] + additionalProperties: false diff --git a/bootstrapvz/plugins/salt/README.rst b/bootstrapvz/plugins/salt/README.rst new file mode 100644 index 0000000..cb48c4b --- /dev/null +++ b/bootstrapvz/plugins/salt/README.rst @@ -0,0 +1,26 @@ +Salt +---- + +Install `salt `__ minion in the image. Uses +`salt-bootstrap `__ script +to install. + +Settings +~~~~~~~~ + +- ``install_source``: Source to install salt codebase from. ``stable`` + for current stable, ``daily`` for installing the daily build, and + ``git`` to install from git repository. + ``required`` +- ``version``: Only needed if you are installing from ``git``. + \ ``develop`` to install current development head, or provide any tag + name or commit hash from `salt + repo `__ + ``optional`` +- ``master``: Salt master FQDN or IP + ``optional`` +- ``grains``: Set `salt + grains `__ + for this minion. Accepts a map with grain name as key and the grain + data as value. + ``optional`` diff --git a/bootstrapvz/plugins/salt/__init__.py b/bootstrapvz/plugins/salt/__init__.py index 0b9cf11..f165f49 100644 --- a/bootstrapvz/plugins/salt/__init__.py +++ b/bootstrapvz/plugins/salt/__init__.py @@ -3,7 +3,7 @@ import tasks def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/salt/manifest-schema.json b/bootstrapvz/plugins/salt/manifest-schema.json deleted file mode 100644 index 82a7952..0000000 --- a/bootstrapvz/plugins/salt/manifest-schema.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Saltstack plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "salt": { - "type": "object", - "properties": { - "master": { - "type": "string" - }, - "install_source": { - "type": "string" - }, - "version": { - "type": "string" - }, - "grains": { - "type": "object", - "patternProperties": { - "^[^\/\\0]+$": { - "type": "string" - } - }, - "minItems": 1 - } - }, - "required": ["install_source"] - } - }, - "required": ["salt"] - } - }, - "required": ["plugins"] -} diff --git a/bootstrapvz/plugins/salt/manifest-schema.yml b/bootstrapvz/plugins/salt/manifest-schema.yml new file mode 100644 index 0000000..6b99428 --- /dev/null +++ b/bootstrapvz/plugins/salt/manifest-schema.yml @@ -0,0 +1,25 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Saltstack plugin manifest +type: object +properties: + plugins: + type: object + properties: + salt: + type: object + properties: + grains: + type: object + patternProperties: + ^[^/\0]+$: {type: string} + minItems: 1 + install_source: + enum: + - stable + - daily + - git + master: {type: string} + version: {type: string} + required: [install_source] + additionalProperties: false diff --git a/bootstrapvz/plugins/salt/tasks.py b/bootstrapvz/plugins/salt/tasks.py index 5a1b294..008a678 100644 --- a/bootstrapvz/plugins/salt/tasks.py +++ b/bootstrapvz/plugins/salt/tasks.py @@ -1,7 +1,6 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import packages -from bootstrapvz.common.tasks import apt from bootstrapvz.common.tools import log_check_call from bootstrapvz.common.tools import sed_i import os @@ -10,8 +9,7 @@ import urllib class InstallSaltDependencies(Task): description = 'Add depended packages for salt-minion' - phase = phases.package_installation - predecessors = [apt.AddDefaultSources] + phase = phases.preparation @classmethod def run(cls, info): @@ -34,17 +32,14 @@ class BootstrapSaltMinion(Task): # This is needed since bootstrap doesn't handle -X for debian distros properly. # We disable checking for running services at end since we do not start them. - sed_i( - bootstrap_script, 'install_debian_check_services', - "disabled_debian_check_services") + sed_i(bootstrap_script, 'install_debian_check_services', 'disabled_debian_check_services') - bootstrap_command = [ - 'chroot', info.root, 'bash', 'install_salt.sh', '-X'] + bootstrap_command = ['chroot', info.root, 'bash', 'install_salt.sh', '-X'] if 'master' in info.manifest.plugins['salt']: bootstrap_command.extend(['-A', info.manifest.plugins['salt']['master']]) - install_source = info.manifest.plugins['salt']['install_source'] + install_source = info.manifest.plugins['salt'].get('install_source', 'stable') bootstrap_command.append(install_source) if install_source == 'git' and ('version' in info.manifest.plugins['salt']): diff --git a/bootstrapvz/plugins/unattended_upgrades/README.rst b/bootstrapvz/plugins/unattended_upgrades/README.rst new file mode 100644 index 0000000..7caded2 --- /dev/null +++ b/bootstrapvz/plugins/unattended_upgrades/README.rst @@ -0,0 +1,18 @@ +Unattended upgrades +------------------- + +Enables the `unattended update/upgrade +feature `__ in +aptitude. Enable it to have your system automatically download and +install security updates automatically with a set interval. + +Settings +~~~~~~~~ + +- ``update_interval``: Days between running ``apt-get update``. + ``required`` +- ``download_interval``: Days between running + ``apt-get upgrade --download-only`` + ``required`` +- ``upgrade_interval``: Days between installing any security upgrades. + ``required`` diff --git a/bootstrapvz/plugins/unattended_upgrades/__init__.py b/bootstrapvz/plugins/unattended_upgrades/__init__.py index f67e60e..dbf5ebd 100644 --- a/bootstrapvz/plugins/unattended_upgrades/__init__.py +++ b/bootstrapvz/plugins/unattended_upgrades/__init__.py @@ -2,7 +2,7 @@ def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/unattended_upgrades/manifest-schema.json b/bootstrapvz/plugins/unattended_upgrades/manifest-schema.json deleted file mode 100644 index 3e45a7b..0000000 --- a/bootstrapvz/plugins/unattended_upgrades/manifest-schema.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Unattended upgrades plugin manifest", - "type": "object", - "properties": { - "plugins": { - "type": "object", - "properties": { - "unattended_upgrades": { - "type": "object", - "properties": { - "update_interval": { - "type": "integer" - }, - "download_interval": { - "type": "integer" - }, - "upgrade_interval": { - "type": "integer" - } - }, - "required": ["update_interval", "download_interval", "upgrade_interval"] - } - }, - "required": ["unattended_upgrades"] - } - }, - "required": ["plugins"] -} diff --git a/bootstrapvz/plugins/unattended_upgrades/manifest-schema.yml b/bootstrapvz/plugins/unattended_upgrades/manifest-schema.yml new file mode 100644 index 0000000..a0a7568 --- /dev/null +++ b/bootstrapvz/plugins/unattended_upgrades/manifest-schema.yml @@ -0,0 +1,19 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Unattended upgrades plugin manifest +type: object +properties: + plugins: + type: object + properties: + unattended_upgrades: + type: object + properties: + download_interval: {type: integer} + update_interval: {type: integer} + upgrade_interval: {type: integer} + required: + - update_interval + - download_interval + - upgrade_interval + additionalProperties: false diff --git a/bootstrapvz/plugins/unattended_upgrades/tasks.py b/bootstrapvz/plugins/unattended_upgrades/tasks.py index 1299588..dd24fb5 100644 --- a/bootstrapvz/plugins/unattended_upgrades/tasks.py +++ b/bootstrapvz/plugins/unattended_upgrades/tasks.py @@ -1,12 +1,10 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt class AddUnattendedUpgradesPackage(Task): description = 'Adding `unattended-upgrades\' to the image packages' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): diff --git a/bootstrapvz/plugins/vagrant/README.rst b/bootstrapvz/plugins/vagrant/README.rst new file mode 100644 index 0000000..a40be4a --- /dev/null +++ b/bootstrapvz/plugins/vagrant/README.rst @@ -0,0 +1,12 @@ +Vagrant +------- + +Vagrant is a tool to quickly create virtualized environments. It uses +"boxes" to make downloading and sharing those environments easier. A box +is a tarball containing a virtual volumes accompanied by an `OVF +specification `__ +of the virtual machine. + +This plugin creates a vagrant box that is ready to be shared or +deployed. At the moment it is only compatible with the VirtualBox +provider and doesn't requires any additional settings. diff --git a/bootstrapvz/plugins/vagrant/__init__.py b/bootstrapvz/plugins/vagrant/__init__.py index af0c6c5..b820a8c 100644 --- a/bootstrapvz/plugins/vagrant/__init__.py +++ b/bootstrapvz/plugins/vagrant/__init__.py @@ -3,7 +3,7 @@ import tasks def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) diff --git a/bootstrapvz/plugins/vagrant/assets/box.ovf b/bootstrapvz/plugins/vagrant/assets/box.ovf index b990dfd..1bcb6e1 100644 --- a/bootstrapvz/plugins/vagrant/assets/box.ovf +++ b/bootstrapvz/plugins/vagrant/assets/box.ovf @@ -1,7 +1,7 @@ - + List of the virtual disks used in the package @@ -15,10 +15,10 @@ A virtual machine - + The kind of installed guest operating system - Debian_64 - Debian_64 + [OS_DESCRIPTION] + [OS_TYPE] Virtual hardware requirements for a virtual machine diff --git a/bootstrapvz/plugins/vagrant/manifest-schema.json b/bootstrapvz/plugins/vagrant/manifest-schema.json deleted file mode 100644 index bb35ab5..0000000 --- a/bootstrapvz/plugins/vagrant/manifest-schema.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Vagrant plugin manifest", - "type": "object", - "properties": { - "provider": { - "type": "string", - "enum": ["virtualbox"] - }, - "system": { - "required": ["hostname"] - }, - "volume": { - "type": "object", - "properties": { - "backing": { - "type": "string", - "enum": ["vmdk"] - // VirtualBox only supports vmdk or raw when importing via OVF: - // https://www.virtualbox.org/browser/vbox/trunk/src/VBox/Main/src-server/ApplianceImplImport.cpp?rev=51092#L636 - } - }, - "required": ["backing"] - }, - "plugins": { - "type": "object", - "properties": { - "vagrant": { - "type": "object" - } - } - } - } -} diff --git a/bootstrapvz/plugins/vagrant/manifest-schema.yml b/bootstrapvz/plugins/vagrant/manifest-schema.yml new file mode 100644 index 0000000..08c6544 --- /dev/null +++ b/bootstrapvz/plugins/vagrant/manifest-schema.yml @@ -0,0 +1,26 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Vagrant plugin manifest +type: object +properties: + provider: + type: object + properties: + name: + type: string + enum: [virtualbox] + system: + required: [hostname] + volume: + type: object + properties: + backing: + type: string + enum: [vmdk] + required: [backing] + plugins: + type: object + properties: + vagrant: + type: object + additionalProperties: false diff --git a/bootstrapvz/plugins/vagrant/tasks.py b/bootstrapvz/plugins/vagrant/tasks.py index c98f0f2..776f1c3 100644 --- a/bootstrapvz/plugins/vagrant/tasks.py +++ b/bootstrapvz/plugins/vagrant/tasks.py @@ -1,7 +1,6 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import workspace -from bootstrapvz.common.tasks import apt import os import shutil @@ -39,7 +38,6 @@ class CreateVagrantBoxDir(Task): class AddPackages(Task): description = 'Add packages that vagrant depends on' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -144,8 +142,7 @@ class PackageBox(Task): box_files = os.listdir(info._vagrant['folder']) log_check_call(['tar', '--create', '--gzip', '--dereference', '--file', info._vagrant['box_path'], - '--directory', info._vagrant['folder']] - + box_files + '--directory', info._vagrant['folder']] + box_files ) import logging logging.getLogger(__name__).info('The vagrant box has been placed at ' + info._vagrant['box_path']) @@ -185,13 +182,24 @@ class PackageBox(Task): # VHDURI = "http://go.microsoft.com/fwlink/?LinkId=137171" volume_uuid = info.volume.get_uuid() [disk] = root.findall('./ovf:DiskSection/ovf:Disk', namespaces) - attr(disk, 'ovf:capacity', info.volume.size.get_qty_in('B')) + attr(disk, 'ovf:capacity', info.volume.size.bytes.get_qty_in('B')) attr(disk, 'ovf:format', info.volume.ovf_uri) - attr(disk, 'ovf:uuid', volume_uuid) + attr(disk, 'vbox:uuid', volume_uuid) [system] = root.findall('./ovf:VirtualSystem', namespaces) attr(system, 'ovf:id', info._vagrant['box_name']) + # Set the operating system + [os_section] = system.findall('./ovf:OperatingSystemSection', namespaces) + os_info = {'i386': {'id': 96, 'name': 'Debian'}, + 'amd64': {'id': 96, 'name': 'Debian_64'} + }.get(info.manifest.system['architecture']) + attr(os_section, 'ovf:id', os_info['id']) + [os_desc] = os_section.findall('./ovf:Description', namespaces) + os_desc.text = os_info['name'] + [os_type] = os_section.findall('./vbox:OSType', namespaces) + os_type.text = os_info['name'] + [sysid] = system.findall('./ovf:VirtualHardwareSection/ovf:System/' 'vssd:VirtualSystemIdentifier', namespaces) sysid.text = info._vagrant['box_name'] @@ -208,7 +216,7 @@ class PackageBox(Task): [device_img] = machine.findall('./ovf:StorageControllers' '/ovf:StorageController[@name="SATA Controller"]' '/ovf:AttachedDevice/ovf:Image', namespaces) - attr(device_img, 'ovf:uuid', '{' + str(volume_uuid) + '}') + attr(device_img, 'uuid', '{' + str(volume_uuid) + '}') template.write(destination, xml_declaration=True) # , default_namespace=namespaces['ovf'] diff --git a/bootstrapvz/providers/README.rst b/bootstrapvz/providers/README.rst new file mode 100644 index 0000000..4ffd326 --- /dev/null +++ b/bootstrapvz/providers/README.rst @@ -0,0 +1,8 @@ +Providers in bootstrap-vz represent various cloud providers and virtual machines. + +bootstrap-vz is an extensible platform with loose coupling and a significant +amount of tooling, which allows for painless implementation of new providers. + +The virtualbox provider for example is implemented in only 89 lines of python, +since most of the building blocks are a part of the common task library. +Only the kernel and guest additions installation are specific to that provider. diff --git a/bootstrapvz/providers/azure/README.md b/bootstrapvz/providers/azure/README.rst similarity index 67% rename from bootstrapvz/providers/azure/README.md rename to bootstrapvz/providers/azure/README.rst index c03009e..165b557 100644 --- a/bootstrapvz/providers/azure/README.md +++ b/bootstrapvz/providers/azure/README.rst @@ -1,22 +1,28 @@ -Azure provider -=========== +Azure +===== -This provider generates raw images for Microsoft Azure computing platform. +This provider generates raw images for Microsoft Azure computing +platform. Setup -===== +----- qemu-img >= 1.7.0 required to convert raw image to vhd fixed size disk. This release is available in wheezy-backports. *wget* must be installed on local computer. - -Manifest must use the *raw* format, provider will automatically transform the disk to a vhd disk format. +Manifest must use the *raw* format, provider will automatically +transform the disk to a vhd disk format. Do not create swap space on the OS disk: -The Windows Azure Linux Agent can automatically configure swap space using the local resource disk that is attached to the VM after provisioning on Azure. Modify the following parameters in /etc/waagent.conf appropriately: +The Windows Azure Linux Agent can automatically configure swap space +using the local resource disk that is attached to the VM after +provisioning on Azure. Modify the following parameters in +/etc/waagent.conf appropriately: + +:: ResourceDisk.Format=y ResourceDisk.Filesystem=ext4 @@ -24,7 +30,10 @@ The Windows Azure Linux Agent can automatically configure swap space using the l ResourceDisk.EnableSwap=y ResourceDisk.SwapSizeMB=2048 ## NOTE: set this to whatever you need it to be. -You can specify a waagent.conf file to replace the default one in the manifest in the azure/waagent section of the provider: +You can specify a waagent.conf file to replace the default one in the +manifest in the azure/waagent section of the provider: + +:: "system" : { "waagent" : { @@ -33,4 +42,5 @@ You can specify a waagent.conf file to replace the default one in the manifest i } }, ... -Waagent versions are available at: https://github.com/Azure/WALinuxAgent/releases +Waagent versions are available at: +https://github.com/Azure/WALinuxAgent/releases diff --git a/bootstrapvz/providers/azure/__init__.py b/bootstrapvz/providers/azure/__init__.py index c86c549..a4c2415 100644 --- a/bootstrapvz/providers/azure/__init__.py +++ b/bootstrapvz/providers/azure/__init__.py @@ -13,7 +13,7 @@ def initialize(): def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) @@ -21,6 +21,7 @@ def resolve_tasks(taskset, manifest): taskset.update(task_groups.get_standard_groups(manifest)) taskset.update([tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, loopback.Create, initd.InstallInitScripts, ssh.AddOpenSSHPackage, diff --git a/bootstrapvz/providers/azure/manifest-schema.json b/bootstrapvz/providers/azure/manifest-schema.json deleted file mode 100644 index 97b4330..0000000 --- a/bootstrapvz/providers/azure/manifest-schema.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Azure manifest", - "type": "object", - "properties": { - "system": { - "type": "object", - "properties": { - "bootloader": { - "type": "string", - "enum": ["grub", "extlinux"] - }, - "waagent": { - "type": "object", - "properties": { - "conf": { - "type": "string" - }, - "version": { - "type": "string" - } - }, - "required": ["version"] - } - }, - "required": ["waagent"] - }, - "volume": { - "type": "object", - "properties": { - "backing": { - "type": "string", - "enum": ["raw"] - }, - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none", "msdos", "gpt"] } - } - } - }, - "required": ["backing"] - } - } -} diff --git a/bootstrapvz/providers/azure/manifest-schema.yml b/bootstrapvz/providers/azure/manifest-schema.yml new file mode 100644 index 0000000..8d9a34d --- /dev/null +++ b/bootstrapvz/providers/azure/manifest-schema.yml @@ -0,0 +1,38 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Azure manifest +type: object +properties: + provider: + type: object + properties: + waagent: + type: object + properties: + conf: {type: string} + version: {type: string} + required: [version] + required: [waagent] + system: + type: object + properties: + bootloader: + type: string + enum: + - grub + - extlinux + volume: + type: object + properties: + backing: + type: string + enum: [raw] + partitions: + type: object + properties: + type: + enum: + - none + - msdos + - gpt + required: [backing] diff --git a/bootstrapvz/providers/azure/tasks/boot.py b/bootstrapvz/providers/azure/tasks/boot.py index 5fc4756..8c61fff 100644 --- a/bootstrapvz/providers/azure/tasks/boot.py +++ b/bootstrapvz/providers/azure/tasks/boot.py @@ -1,12 +1,12 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import boot +from bootstrapvz.common.tasks import grub class ConfigureGrub(Task): description = 'Change grub configuration to allow for ttyS0 output' phase = phases.system_modification - successors = [boot.InstallGrub] + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] @classmethod def run(cls, info): diff --git a/bootstrapvz/providers/azure/tasks/packages-kernels.yml b/bootstrapvz/providers/azure/tasks/packages-kernels.yml new file mode 100644 index 0000000..1279faf --- /dev/null +++ b/bootstrapvz/providers/azure/tasks/packages-kernels.yml @@ -0,0 +1,14 @@ +--- +# This is a mapping of Debian release codenames to processor architectures to kernel packages +squeeze: + amd64: linux-image-amd64 + i386: linux-image-686 +wheezy: + amd64: linux-image-amd64 + i386: linux-image-686 +jessie: + amd64: linux-image-amd64 + i386: linux-image-686-pae +sid: + amd64: linux-image-amd64 + i386: linux-image-686-pae diff --git a/bootstrapvz/providers/azure/tasks/packages.py b/bootstrapvz/providers/azure/tasks/packages.py index 6cb4fa0..363f5fe 100644 --- a/bootstrapvz/providers/azure/tasks/packages.py +++ b/bootstrapvz/providers/azure/tasks/packages.py @@ -1,24 +1,26 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt from bootstrapvz.common.tasks.packages import InstallPackages class DefaultPackages(Task): description = 'Adding image packages required for Azure' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): - kernels = {'amd64': 'linux-image-amd64', - 'i386': 'linux-image-686', } - info.packages.add(kernels.get(info.manifest.system['architecture'])) info.packages.add('openssl') info.packages.add('python-openssl') info.packages.add('python-pyasn1') info.packages.add('sudo') + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) + class Waagent(Task): description = 'Add waagent' @@ -29,7 +31,7 @@ class Waagent(Task): def run(cls, info): from bootstrapvz.common.tools import log_check_call import os - waagent_version = info.manifest.system['waagent']['version'] + waagent_version = info.manifest.provider['waagent']['version'] waagent_file = 'WALinuxAgent-' + waagent_version + '.tar.gz' waagent_url = 'https://github.com/Azure/WALinuxAgent/archive/' + waagent_file log_check_call(['wget', '-P', info.root, waagent_url]) diff --git a/bootstrapvz/providers/ec2/README.rst b/bootstrapvz/providers/ec2/README.rst new file mode 100644 index 0000000..184f236 --- /dev/null +++ b/bootstrapvz/providers/ec2/README.rst @@ -0,0 +1,71 @@ +EC2 +=== + +The `EC2 `__ provider automatically creates +a volume for bootstrapping (be it EBS or S3), makes a snapshot of it +once it is done and registers it as an AMI. EBS volume backing only +works on an EC2 host while S3 backed volumes *should* work locally (at +this time however they do not, a fix is in the works). + +Unless `the cloud-init plugin <../../plugins/cloud_init>`__ +is used, special startup scripts will be installed that automatically fetch the +configured authorized\_key from the instance metadata and save or run +any userdata supplied (if the userdata begins with ``#!`` it will be +run). + +Credentials +----------- + +The AWS credentials can be configured in two ways: Via the manifest or +through environment variables. To bootstrap S3 backed instances you will +need a user certificate and a private key in addition to the access key +and secret key, which are needed for bootstraping EBS backed instances. + +The settings describes below should be placed in the ``credentials`` key +under the ``provider`` section. + +- ``access-key``: AWS access-key. + May also be supplied via the environment variable + ``$AWS_ACCESS_KEY`` + ``required for EBS & S3 backing`` +- ``secret-key``: AWS secret-key. + May also be supplied via the environment variable + ``$AWS_SECRET_KEY`` + ``required for EBS & S3 backing`` +- ``certificate``: Path to the AWS user certificate. Used for + uploading the image to an S3 bucket. + May also be supplied via the environment variable + ``$AWS_CERTIFICATE`` + ``required for S3 backing`` +- ``private-key``: Path to the AWS private key. Used for uploading + the image to an S3 bucket. + May also be supplied via the environment variable + ``$AWS_PRIVATE_KEY`` + ``required for S3 backing`` +- ``user-id``: AWS user ID. Used for uploading the image to an S3 + bucket. + May also be supplied via the environment variable ``$AWS_USER_ID`` + ``required for S3 backing`` + +Example: + +.. code:: yaml + + --- + provider: + name: ec2 + virtualization: hvm + enhanced_networking: simple + credentials: + access-key: AFAKEACCESSKEYFORAWS + secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva + +Dependencies +------------ + +To communicate with the AWS API `boto `__ +is required (version 2.14.0 or higher) you can install boto with +``pip install boto`` (on wheezy, the packaged version is too low). S3 +images are chopped up and uploaded using +`euca2ools `__ (install with +``apt-get install euca2ools``). diff --git a/bootstrapvz/providers/ec2/__init__.py b/bootstrapvz/providers/ec2/__init__.py index fd5c804..23e3c45 100644 --- a/bootstrapvz/providers/ec2/__init__.py +++ b/bootstrapvz/providers/ec2/__init__.py @@ -11,8 +11,10 @@ import tasks.initd from bootstrapvz.common.tasks import volume from bootstrapvz.common.tasks import filesystem from bootstrapvz.common.tasks import boot +from bootstrapvz.common.tasks import grub from bootstrapvz.common.tasks import initd from bootstrapvz.common.tasks import loopback +from bootstrapvz.common.tasks import kernel def initialize(): @@ -23,7 +25,7 @@ def initialize(): def validate_manifest(data, validator, error): import os.path - validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) from bootstrapvz.common.bytes import Bytes if data['volume']['backing'] == 'ebs': @@ -31,33 +33,30 @@ def validate_manifest(data, validator, error): for key, partition in data['volume']['partitions'].iteritems(): if key != 'type': volume_size += Bytes(partition['size']) - if volume_size % Bytes('1GiB') != 0: + if int(volume_size % Bytes('1GiB')) != 0: msg = ('The volume size must be a multiple of 1GiB when using EBS backing') error(msg, ['volume', 'partitions']) else: - validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema-s3.json')) + validator(data, os.path.join(os.path.dirname(__file__), 'manifest-schema-s3.yml')) bootloader = data['system']['bootloader'] - virtualization = data['virtualization'] + virtualization = data['provider']['virtualization'] backing = data['volume']['backing'] partition_type = data['volume']['partitions']['type'] + enhanced_networking = data['provider']['enhanced_networking'] if 'enhanced_networking' in data['provider'] else None if virtualization == 'pvm' and bootloader != 'pvgrub': error('Paravirtualized AMIs only support pvgrub as a bootloader', ['system', 'bootloader']) - if virtualization == 'hvm': - if backing != 'ebs': + if backing != 'ebs' and virtualization == 'hvm': error('HVM AMIs currently only work when they are EBS backed', ['volume', 'backing']) - if bootloader != 'extlinux': - error('HVM AMIs currently only work with extlinux as a bootloader', ['system', 'bootloader']) - if bootloader == 'extlinux' and partition_type not in ['none', 'msdos']: - error('HVM AMIs booted with extlinux currently work with unpartitioned or msdos partitions volumes', - ['volume', 'partitions', 'type']) - if backing == 's3': - if partition_type != 'none': + if backing == 's3' and partition_type != 'none': error('S3 backed AMIs currently only work with unpartitioned volumes', ['system', 'bootloader']) + if enhanced_networking == 'simple' and virtualization != 'hvm': + error('Enhanced networking only works with HVM virtualization', ['provider', 'virtualization']) + def resolve_tasks(taskset, manifest): taskset.update(task_groups.get_standard_groups(manifest)) @@ -84,7 +83,7 @@ def resolve_tasks(taskset, manifest): taskset.add(initd.AdjustExpandRootScript) if manifest.system['bootloader'] == 'pvgrub': - taskset.add(boot.AddGrubPackage) + taskset.add(grub.AddGrubPackage) taskset.add(tasks.boot.ConfigurePVGrub) if manifest.volume['backing'].lower() == 'ebs': @@ -106,6 +105,11 @@ def resolve_tasks(taskset, manifest): ]) taskset.discard(filesystem.FStab) + if manifest.provider.get('enhanced_networking', None) == 'simple': + taskset.update([kernel.AddDKMSPackages, + tasks.network.InstallEnhancedNetworking, + kernel.UpdateInitramfs]) + taskset.update([filesystem.Format, volume.Delete, ]) diff --git a/bootstrapvz/providers/ec2/ebsvolume.py b/bootstrapvz/providers/ec2/ebsvolume.py index 2cd5549..270bc75 100644 --- a/bootstrapvz/providers/ec2/ebsvolume.py +++ b/bootstrapvz/providers/ec2/ebsvolume.py @@ -11,7 +11,7 @@ class EBSVolume(Volume): def _before_create(self, e): conn = e.connection zone = e.zone - size = self.size.get_qty_in('GiB') + size = self.size.bytes.get_qty_in('GiB') self.volume = conn.create_volume(size, zone) while self.volume.volume_state() != 'available': time.sleep(5) diff --git a/bootstrapvz/providers/ec2/manifest-schema-s3.json b/bootstrapvz/providers/ec2/manifest-schema-s3.json deleted file mode 100644 index 79cd334..0000000 --- a/bootstrapvz/providers/ec2/manifest-schema-s3.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "EC2 manifest for instance store AMIs", - "type": "object", - "properties": { - "credentials": { - "type": "object", - "properties": { - "certificate": { - "type": "string" - }, - "private-key": { - "type": "string" - }, - "user-id": { - "type": "string", - "pattern": "(^arn:aws:iam::\\d*:user/\\w.*$)|(^\\d{4}-\\d{4}-\\d{4}$)" - } - } - }, - "image": { - "type": "object", - "properties": { - "bucket": { - "type": "string" - }, - "region": { - "$ref": "#/definitions/aws-region" - } - }, - "required": ["bucket", "region"] - } - }, - "required": ["image"], - "definitions": { - "aws-region": { - "enum": ["ap-northeast-1", "ap-southeast-1", - "ap-southeast-2", "eu-west-1", - "sa-east-1", "us-east-1", - "us-gov-west-1", "us-west-1", - "us-west-2", "cn-north-1"] - } - } -} diff --git a/bootstrapvz/providers/ec2/manifest-schema-s3.yml b/bootstrapvz/providers/ec2/manifest-schema-s3.yml new file mode 100644 index 0000000..6e0fc3e --- /dev/null +++ b/bootstrapvz/providers/ec2/manifest-schema-s3.yml @@ -0,0 +1,38 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: EC2 manifest for instance store AMIs +type: object +properties: + image: + type: object + properties: + bucket: {type: string} + region: {$ref: '#/definitions/aws-region'} + required: + - bucket + - region + provider: + type: object + properties: + credentials: + type: object + properties: + certificate: {type: string} + private-key: {type: string} + user-id: + type: string + pattern: (^arn:aws:iam::\d*:user/\w.*$)|(^\d{4}-\d{4}-\d{4}$) +required: [image] +definitions: + aws-region: + enum: + - ap-northeast-1 + - ap-southeast-1 + - ap-southeast-2 + - eu-west-1 + - sa-east-1 + - us-east-1 + - us-gov-west-1 + - us-west-1 + - us-west-2 + - cn-north-1 diff --git a/bootstrapvz/providers/ec2/manifest-schema.json b/bootstrapvz/providers/ec2/manifest-schema.json deleted file mode 100644 index bb4bd97..0000000 --- a/bootstrapvz/providers/ec2/manifest-schema.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "EC2 manifest", - "type": "object", - "properties": { - "virtualization": { "enum": ["pvm", "hvm"] }, - "image": { - "type": "object", - "properties": { - "description": { - "type": "string" - } - } - }, - "credentials": { - "type": "object", - "properties": { - "access-key": { - "type": "string" - }, - "secret-key": { - "type": "string" - } - } - }, - "system": { - "type": "object", - "properties": { - "bootloader": { - "type": "string", - "enum": ["pvgrub", "extlinux"] - } - } - }, - "volume": { - "type": "object", - "properties": { - "backing": { "enum": ["ebs", "s3"] }, - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none", "msdos", "gpt"] } - } - } - }, - "required": ["backing"] - } - }, - "required": ["image"] -} diff --git a/bootstrapvz/providers/ec2/manifest-schema.yml b/bootstrapvz/providers/ec2/manifest-schema.yml new file mode 100644 index 0000000..696cfa7 --- /dev/null +++ b/bootstrapvz/providers/ec2/manifest-schema.yml @@ -0,0 +1,52 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: EC2 manifest +type: object +properties: + image: + type: object + properties: + description: {type: string} + provider: + type: object + properties: + credentials: + type: object + properties: + access-key: {type: string} + secret-key: {type: string} + virtualization: + enum: + - pvm + - hvm + enhanced_networking: + enum: + - none + - simple + required: [virtualization] + system: + type: object + properties: + bootloader: + type: string + enum: + - pvgrub + - grub + - extlinux + volume: + type: object + properties: + backing: + enum: + - ebs + - s3 + partitions: + type: object + properties: + type: + enum: + - none + - msdos + - gpt + required: [backing] +required: [image] diff --git a/bootstrapvz/providers/ec2/tasks/ami-akis.json b/bootstrapvz/providers/ec2/tasks/ami-akis.json deleted file mode 100644 index 79e1b66..0000000 --- a/bootstrapvz/providers/ec2/tasks/ami-akis.json +++ /dev/null @@ -1,34 +0,0 @@ -// This is a mapping of EC2 regions to processor architectures to Amazon Kernel Images -// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs -{ -"ap-northeast-1": // Asia Pacific (Tokyo) Region - {"i386": "aki-136bf512", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-176bf516"}, // pv-grub-hd0_1.04-x86_64.gz -"ap-southeast-1": // Asia Pacific (Singapore) Region - {"i386": "aki-ae3973fc", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-503e7402"}, // pv-grub-hd0_1.04-x86_64.gz -"ap-southeast-2": // Asia Pacific (Sydney) Region - {"i386": "aki-cd62fff7", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-c362fff9"}, // pv-grub-hd0_1.04-x86_64.gz -"eu-west-1": // EU (Ireland) Region - {"i386": "aki-68a3451f", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-52a34525"}, // pv-grub-hd0_1.04-x86_64.gz -"sa-east-1": // South America (Sao Paulo) Region - {"i386": "aki-5b53f446", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-5553f448"}, // pv-grub-hd0_1.04-x86_64.gz -"us-east-1": // US East (Northern Virginia) Region - {"i386": "aki-8f9dcae6", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-919dcaf8"}, // pv-grub-hd0_1.04-x86_64.gz -"us-gov-west-1": // AWS GovCloud (US) - {"i386": "aki-1fe98d3c", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-1de98d3e"}, // pv-grub-hd0_1.04-x86_64.gz -"us-west-1": // US West (Northern California) Region - {"i386": "aki-8e0531cb", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-880531cd"}, // pv-grub-hd0_1.04-x86_64.gz -"us-west-2": // US West (Oregon) Region - {"i386": "aki-f08f11c0", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-fc8f11cc"}, // pv-grub-hd0_1.04-x86_64.gz -"cn-north-1":// China North (Beijing) Region - {"i386": "aki-908f1da9", // pv-grub-hd0_1.04-i386.gz - "amd64": "aki-9e8f1da7"} // pv-grub-hd0_1.04-x86_64.gz -} diff --git a/bootstrapvz/providers/ec2/tasks/ami-akis.yml b/bootstrapvz/providers/ec2/tasks/ami-akis.yml new file mode 100644 index 0000000..f03a64f --- /dev/null +++ b/bootstrapvz/providers/ec2/tasks/ami-akis.yml @@ -0,0 +1,33 @@ +--- +# This is a mapping of EC2 regions to processor architectures to Amazon Kernel Images +# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs +ap-northeast-1: + amd64: aki-176bf516 + i386: aki-136bf512 +ap-southeast-1: + amd64: aki-503e7402 + i386: aki-ae3973fc +ap-southeast-2: + amd64: aki-c362fff9 + i386: aki-cd62fff7 +eu-west-1: + amd64: aki-52a34525 + i386: aki-68a3451f +sa-east-1: + amd64: aki-5553f448 + i386: aki-5b53f446 +us-east-1: + amd64: aki-919dcaf8 + i386: aki-8f9dcae6 +us-gov-west-1: + amd64: aki-1de98d3e + i386: aki-1fe98d3c +us-west-1: + amd64: aki-880531cd + i386: aki-8e0531cb +us-west-2: + amd64: aki-fc8f11cc + i386: aki-f08f11c0 +cn-north-1: + amd64: aki-9e8f1da7 + i386: aki-908f1da9 diff --git a/bootstrapvz/providers/ec2/tasks/ami.py b/bootstrapvz/providers/ec2/tasks/ami.py index cb8d459..4218294 100644 --- a/bootstrapvz/providers/ec2/tasks/ami.py +++ b/bootstrapvz/providers/ec2/tasks/ami.py @@ -21,7 +21,7 @@ class AMIName(Task): ami_name = info.manifest.image['name'].format(**info.manifest_vars) ami_description = info.manifest.image['description'].format(**info.manifest_vars) - images = info._ec2['connection'].get_all_images() + images = info._ec2['connection'].get_all_images(owners=['self']) for image in images: if ami_name == image.name: msg = 'An image by the name {ami_name} already exists.'.format(ami_name=ami_name) @@ -71,8 +71,7 @@ class UploadImage(Task): '--access-key', info.credentials['access-key'], '--secret-key', info.credentials['secret-key'], '--url', s3_url, - '--region', info._ec2['region'], - '--ec2cert', cert_ec2]) + '--region', info._ec2['region']]) class RemoveBundle(Task): @@ -103,23 +102,26 @@ class RegisterAMI(Task): registration_params['image_location'] = info._ec2['manifest_location'] else: root_dev_name = {'pvm': '/dev/sda', - 'hvm': '/dev/xvda'}.get(info.manifest.data['virtualization']) + 'hvm': '/dev/xvda'}.get(info.manifest.provider['virtualization']) registration_params['root_device_name'] = root_dev_name from boto.ec2.blockdevicemapping import BlockDeviceType from boto.ec2.blockdevicemapping import BlockDeviceMapping block_device = BlockDeviceType(snapshot_id=info._ec2['snapshot'].id, delete_on_termination=True, - size=info.volume.size.get_qty_in('GiB')) + size=info.volume.size.bytes.get_qty_in('GiB')) registration_params['block_device_map'] = BlockDeviceMapping() registration_params['block_device_map'][root_dev_name] = block_device - if info.manifest.data['virtualization'] == 'hvm': + if info.manifest.provider['virtualization'] == 'hvm': registration_params['virtualization_type'] = 'hvm' else: registration_params['virtualization_type'] = 'paravirtual' - akis_path = os.path.join(os.path.dirname(__file__), 'ami-akis.json') + akis_path = os.path.join(os.path.dirname(__file__), 'ami-akis.yml') from bootstrapvz.common.tools import config_get registration_params['kernel_id'] = config_get(akis_path, [info._ec2['region'], info.manifest.system['architecture']]) + if info.manifest.provider.get('enhanced_networking', None) == 'simple': + registration_params['sriov_net_support'] = 'simple' + info._ec2['image'] = info._ec2['connection'].register_image(**registration_params) diff --git a/bootstrapvz/providers/ec2/tasks/boot.py b/bootstrapvz/providers/ec2/tasks/boot.py index c325513..11b322f 100644 --- a/bootstrapvz/providers/ec2/tasks/boot.py +++ b/bootstrapvz/providers/ec2/tasks/boot.py @@ -44,7 +44,7 @@ class ConfigurePVGrub(Task): sed_i(grub_def, '^GRUB_TIMEOUT=[0-9]+', 'GRUB_TIMEOUT=0\n' 'GRUB_HIDDEN_TIMEOUT=true') sed_i(grub_def, '^#GRUB_TERMINAL=console', 'GRUB_TERMINAL=console') - sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT="quiet"', 'GRUB_CMDLINE_LINUX_DEFAULT="console=hvc0"') + sed_i(grub_def, '^GRUB_CMDLINE_LINUX_DEFAULT=.*', 'GRUB_CMDLINE_LINUX_DEFAULT="consoleblank=0 console=hvc0 elevator=noop"') from bootstrapvz.common.tools import log_check_call log_check_call(['chroot', info.root, 'update-grub']) diff --git a/bootstrapvz/providers/ec2/tasks/connection.py b/bootstrapvz/providers/ec2/tasks/connection.py index cf203e4..5ace8bc 100644 --- a/bootstrapvz/providers/ec2/tasks/connection.py +++ b/bootstrapvz/providers/ec2/tasks/connection.py @@ -18,10 +18,11 @@ class GetCredentials(Task): def get_credentials(cls, manifest, keys): from os import getenv creds = {} - if all(key in manifest.data['credentials'] for key in keys): - for key in keys: - creds[key] = manifest.data['credentials'][key] - return creds + if 'credentials' in manifest.provider: + if all(key in manifest.provider['credentials'] for key in keys): + for key in keys: + creds[key] = manifest.provider['credentials'][key] + return creds def env_key(key): return ('aws-' + key).upper().replace('-', '_') @@ -29,6 +30,15 @@ class GetCredentials(Task): for key in keys: creds[key] = getenv(env_key(key)) return creds + + def provider_key(key): + return key.replace('-', '_') + import boto.provider + provider = boto.provider.Provider('aws') + if all(getattr(provider, provider_key(key)) is not None for key in keys): + for key in keys: + creds[key] = getattr(provider, provider_key(key)) + return creds raise RuntimeError(('No ec2 credentials found, they must all be specified ' 'exclusively via environment variables or through the manifest.')) diff --git a/bootstrapvz/providers/ec2/tasks/network.py b/bootstrapvz/providers/ec2/tasks/network.py index 8d3f695..efc8a56 100644 --- a/bootstrapvz/providers/ec2/tasks/network.py +++ b/bootstrapvz/providers/ec2/tasks/network.py @@ -1,6 +1,6 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt +from bootstrapvz.common.tasks import kernel import os.path @@ -11,16 +11,17 @@ class EnableDHCPCDDNS(Task): @classmethod def run(cls, info): # The dhcp client that ships with debian sets the DNS servers per default. - # For dhcpcd we need to configure it to do that. - from bootstrapvz.common.tools import sed_i - dhcpcd = os.path.join(info.root, 'etc/default/dhcpcd') - sed_i(dhcpcd, '^#*SET_DNS=.*', 'SET_DNS=\'yes\'') + # For dhcpcd in Wheezy and earlier we need to configure it to do that. + from bootstrapvz.common.releases import wheezy + if info.manifest.release <= wheezy: + from bootstrapvz.common.tools import sed_i + dhcpcd = os.path.join(info.root, 'etc/default/dhcpcd') + sed_i(dhcpcd, '^#*SET_DNS=.*', 'SET_DNS=\'yes\'') class AddBuildEssentialPackage(Task): description = 'Adding build-essential package' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -28,30 +29,39 @@ class AddBuildEssentialPackage(Task): class InstallEnhancedNetworking(Task): - description = 'Installing network drivers for SR-IOV support' - phase = phases.package_installation + description = 'Installing enhanced networking kernel driver using DKMS' + phase = phases.system_modification + successors = [kernel.UpdateInitramfs] @classmethod def run(cls, info): - drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/2.11.3/ixgbevf-2.11.3.tar.gz' - archive = os.path.join(info.root, 'tmp', 'ixgbevf-2.11.3.tar.gz') + version = '2.15.3' + drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version) + archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version)) + module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version)) import urllib urllib.urlretrieve(drivers_url, archive) from bootstrapvz.common.tools import log_check_call - log_check_call('tar', '--ungzip', - '--extract', - '--file', archive, - '--directory', os.path.join(info.root, 'tmp')) + log_check_call(['tar', '--ungzip', + '--extract', + '--file', archive, + '--directory', os.path.join(info.root, 'usr', 'src')]) - src_dir = os.path.join('/tmp', os.path.basename(drivers_url), 'src') - log_check_call(['chroot', info.root, - 'make', '--directory', src_dir]) - log_check_call(['chroot', info.root, - 'make', 'install', - '--directory', src_dir]) + with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf: + dkms_conf.write("""PACKAGE_NAME="ixgbevf" +PACKAGE_VERSION="%s" +CLEAN="cd src/; make clean" +MAKE="cd src/; make BUILD_KERNEL=${kernelver}" +BUILT_MODULE_LOCATION[0]="src/" +BUILT_MODULE_NAME[0]="ixgbevf" +DEST_MODULE_LOCATION[0]="/updates" +DEST_MODULE_NAME[0]="ixgbevf" +AUTOINSTALL="yes" +""" % (version)) - ixgbevf_conf_path = os.path.join(info.root, 'etc/modprobe.d/ixgbevf.conf') - with open(ixgbevf_conf_path, 'w') as ixgbevf_conf: - ixgbevf_conf.write('options ixgbevf InterruptThrottleRate=1,1,1,1,1,1,1,1') + for task in ['add', 'build', 'install']: + # Invoke DKMS task using specified kernel module (-m) and version (-v) + log_check_call(['chroot', info.root, + 'dkms', task, '-m', 'ixgbevf', '-v', version]) diff --git a/bootstrapvz/providers/ec2/tasks/packages-kernels.json b/bootstrapvz/providers/ec2/tasks/packages-kernels.json deleted file mode 100644 index adbbf97..0000000 --- a/bootstrapvz/providers/ec2/tasks/packages-kernels.json +++ /dev/null @@ -1,15 +0,0 @@ -// This is a mapping of Debian release codenames to processor architectures to kernel packages -{ -"squeeze": // In squeeze, we need a special kernel flavor for xen - {"i386": "linux-image-xen-686", - "amd64": "linux-image-xen-amd64"}, -"wheezy": - {"i386": "linux-image-686", - "amd64": "linux-image-amd64"}, -"jessie": - {"i386": "linux-image-686", - "amd64": "linux-image-amd64"}, -"sid": - {"i386": "linux-image-686", - "amd64": "linux-image-amd64"} -} diff --git a/bootstrapvz/providers/ec2/tasks/packages-kernels.yml b/bootstrapvz/providers/ec2/tasks/packages-kernels.yml new file mode 100644 index 0000000..1d5a4a0 --- /dev/null +++ b/bootstrapvz/providers/ec2/tasks/packages-kernels.yml @@ -0,0 +1,14 @@ +--- +# This is a mapping of Debian release codenames to processor architectures to kernel packages +squeeze: # In squeeze, we need a special kernel flavor for xen + amd64: linux-image-xen-amd64 + i386: linux-image-xen-686 +wheezy: + amd64: linux-image-amd64 + i386: linux-image-686 +jessie: + amd64: linux-image-amd64 + i386: linux-image-686-pae +sid: + amd64: linux-image-amd64 + i386: linux-image-686-pae diff --git a/bootstrapvz/providers/ec2/tasks/packages.py b/bootstrapvz/providers/ec2/tasks/packages.py index 596d3fb..e02cd60 100644 --- a/bootstrapvz/providers/ec2/tasks/packages.py +++ b/bootstrapvz/providers/ec2/tasks/packages.py @@ -1,24 +1,27 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt class DefaultPackages(Task): description = 'Adding image packages required for EC2' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): info.packages.add('file') # Needed for the init scripts - info.packages.add('dhcpcd') # isc-dhcp-client doesn't work properly with ec2 + # isc-dhcp-client doesn't work properly with ec2 + from bootstrapvz.common.releases import jessie + if info.manifest.release >= jessie: + info.packages.add('dhcpcd5') + else: + info.packages.add('dhcpcd') info.exclude_packages.add('isc-dhcp-client') info.exclude_packages.add('isc-dhcp-common') import os.path - kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.json') + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.release_codename, + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package) diff --git a/bootstrapvz/providers/gce/README.rst b/bootstrapvz/providers/gce/README.rst new file mode 100644 index 0000000..e760120 --- /dev/null +++ b/bootstrapvz/providers/gce/README.rst @@ -0,0 +1,9 @@ +Google Compute Engine +--------------------- + +The `GCE `__ provider +can creates image as expected by GCE - i.e. raw disk image in \*.tar.gz +file. It can upload created images to Google Storage Engine (to URI +provided in manifest by ``gcs_destination``) and can register image to +be used by Google Compute Engine to project provided in manifest by +``gce_project``. Both of those functionalities are not fully tested yet. diff --git a/bootstrapvz/providers/gce/__init__.py b/bootstrapvz/providers/gce/__init__.py index b705c20..cda00e9 100644 --- a/bootstrapvz/providers/gce/__init__.py +++ b/bootstrapvz/providers/gce/__init__.py @@ -3,11 +3,16 @@ import tasks.apt import tasks.boot import tasks.configuration import tasks.image +import tasks.initd import tasks.host import tasks.packages +from bootstrapvz.common.tasks import apt +from bootstrapvz.common.tasks import boot from bootstrapvz.common.tasks import loopback +from bootstrapvz.common.tasks import initd +from bootstrapvz.common.tasks import kernel from bootstrapvz.common.tasks import ssh -import bootstrapvz.plugins.cloud_init.tasks +from bootstrapvz.common.tasks import volume def initialize(): @@ -16,32 +21,48 @@ def initialize(): def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) def resolve_tasks(taskset, manifest): taskset.update(task_groups.get_standard_groups(manifest)) - taskset.update([bootstrapvz.plugins.cloud_init.tasks.AddBackports, + taskset.update([apt.AddBackports, + loopback.AddRequiredCommands, loopback.Create, tasks.apt.SetPackageRepositories, tasks.apt.ImportGoogleKey, tasks.packages.DefaultPackages, + tasks.packages.ReleasePackages, tasks.packages.GooglePackages, - tasks.packages.InstallGSUtil, tasks.configuration.GatherReleaseInformation, tasks.host.DisableIPv6, + tasks.host.InstallHostnameHook, tasks.boot.ConfigureGrub, + initd.AddExpandRoot, + tasks.initd.AdjustExpandRootDev, + initd.InstallInitScripts, + boot.BlackListModules, + boot.UpdateInitramfs, ssh.AddSSHKeyGeneration, + ssh.DisableSSHPasswordAuthentication, tasks.apt.CleanGoogleRepositoriesAndKeys, loopback.MoveImage, tasks.image.CreateTarball, + volume.Delete, ]) + if manifest.volume['partitions']['type'] != 'none': + taskset.add(initd.AdjustExpandRootScript) + + if manifest.volume['partitions']['type'] != 'mbr': + taskset.update([tasks.initd.AddGrowRootDisable, + kernel.UpdateInitramfs]) + if 'gcs_destination' in manifest.image: taskset.add(tasks.image.UploadImage) if 'gce_project' in manifest.image: diff --git a/bootstrapvz/providers/gce/assets/initramfs-tools/scripts/local-premount/gce-disable-growroot b/bootstrapvz/providers/gce/assets/initramfs-tools/scripts/local-premount/gce-disable-growroot new file mode 100755 index 0000000..0e7d7d9 --- /dev/null +++ b/bootstrapvz/providers/gce/assets/initramfs-tools/scripts/local-premount/gce-disable-growroot @@ -0,0 +1,52 @@ +# Selectively disable growroot -*- shell-script -*- +set -e + +message() { echo "DISABLE-GROWROOT:" "$@" ; } +error_exit() { message "$@"; exit 1; } + +. /scripts/functions + +# initramfs-tools exports the following variables, used below: +# $ROOT - Generally "/dev/disk/by-uuid/" which is a link to /dev/sda1 +# $ROOTFLAGS - Generally empty +# $ROOTFSTYPE - Generally empty +# $rootmnt - Set to "/root" + +# Follow link to get real root location +if [ ! -L "${ROOT}" ]; then + real_root=${ROOT} +else + real_root=$(readlink -f "${ROOT}") +fi + +# Remove partition number to get disk +disk=$(echo ${real_root} | sed 's/[0-9]*$//') + +# Determine number of 512-byte sectors in 2TB +two_tb=$((2*(1024**4))) +max_sectors=$((${two_tb}/512)) + +# Determine number of sectors on disk +geometry=$(sfdisk ${disk} --show-pt-geometry) +cyl=$(echo $geometry | cut -d " " -f 2) +heads=$(echo $geometry | cut -d " " -f 4) +secs=$(echo $geometry | cut -d " " -f 6) +sectors=$((${cyl}*${heads}*${secs})) + +# If disk is >2TB, disable growroot +if [ "$sectors" -gt "$max_sectors" ]; then + message "Disk size >2TB - Not expanding root partition" + # Temporarily mount filesystem + if [ -z "${ROOTFSTYPE}" ]; then + fstype=$(get_fstype "${ROOT}") + else + fstype=${ROOTFSTYPE} + fi + mount -w ${fstype:+-t ${fstype} }${ROOTFLAGS} ${ROOT} ${rootmnt} || + error_exit "failed to mount ${ROOT}." + # Disable growroot + touch "${rootmnt}/etc/growroot-disabled" + # Unmount filesystem + umount "${rootmnt}" || error_exit "failed to umount ${rootmnt}"; +fi + diff --git a/bootstrapvz/providers/gce/manifest-schema.json b/bootstrapvz/providers/gce/manifest-schema.json deleted file mode 100644 index 4e1d02d..0000000 --- a/bootstrapvz/providers/gce/manifest-schema.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "GCE manifest", - "type": "object", - "properties": { - "image": { - "type": "object", - "properties": { - "description": { - "type": "string" - }, - "gcs_destination": { - "type": "string" - }, - "gce_project": { - "type": "string" - } - } - }, - "system": { - "type": "object", - "properties": { - "bootloader": { - "type": "string", - "enum": ["grub", "extlinux"] - } - } - }, - "volume": { - "type": "object", - "properties": { - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["msdos"] } - } - } - }, - "required": ["partitions"] - } - } -} - diff --git a/bootstrapvz/providers/gce/manifest-schema.yml b/bootstrapvz/providers/gce/manifest-schema.yml new file mode 100644 index 0000000..aa6bb59 --- /dev/null +++ b/bootstrapvz/providers/gce/manifest-schema.yml @@ -0,0 +1,29 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: GCE manifest +type: object +properties: + image: + type: object + properties: + description: {type: string} + gce_project: {type: string} + gcs_destination: {type: string} + system: + type: object + properties: + bootloader: + type: string + enum: + - grub + - extlinux + volume: + type: object + properties: + partitions: + type: object + properties: + type: + enum: + - msdos + required: [partitions] diff --git a/bootstrapvz/providers/gce/tasks/__init__.py b/bootstrapvz/providers/gce/tasks/__init__.py index e69de29..494ec79 100644 --- a/bootstrapvz/providers/gce/tasks/__init__.py +++ b/bootstrapvz/providers/gce/tasks/__init__.py @@ -0,0 +1,3 @@ +import os.path + +assets = os.path.normpath(os.path.join(os.path.dirname(__file__), '../assets')) diff --git a/bootstrapvz/providers/gce/tasks/apt.py b/bootstrapvz/providers/gce/tasks/apt.py index 733c1c6..229656a 100644 --- a/bootstrapvz/providers/gce/tasks/apt.py +++ b/bootstrapvz/providers/gce/tasks/apt.py @@ -1,6 +1,7 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt +from bootstrapvz.common.tasks import network from bootstrapvz.common.tools import log_check_call import os @@ -8,7 +9,7 @@ import os class SetPackageRepositories(Task): description = 'Adding apt sources' phase = phases.preparation - successors = [apt.AddManifestSources] + predecessors = [apt.AddManifestSources, apt.AddBackports] @classmethod def run(cls, info): @@ -39,7 +40,7 @@ class ImportGoogleKey(Task): class CleanGoogleRepositoriesAndKeys(Task): description = 'Removing Google key and apt source files' phase = phases.system_cleaning - successors = [apt.AptClean] + successors = [apt.AptClean, network.RemoveDNSInfo] @classmethod def run(cls, info): diff --git a/bootstrapvz/providers/gce/tasks/boot.py b/bootstrapvz/providers/gce/tasks/boot.py index 1100210..224c7fc 100644 --- a/bootstrapvz/providers/gce/tasks/boot.py +++ b/bootstrapvz/providers/gce/tasks/boot.py @@ -1,13 +1,13 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import boot +from bootstrapvz.common.tasks import grub import os.path class ConfigureGrub(Task): description = 'Change grub configuration to allow for ttyS0 output' phase = phases.system_modification - successors = [boot.InstallGrub] + successors = [grub.InstallGrub_1_99, grub.InstallGrub_2] @classmethod def run(cls, info): diff --git a/bootstrapvz/providers/gce/tasks/host.py b/bootstrapvz/providers/gce/tasks/host.py index bd61878..aabd017 100644 --- a/bootstrapvz/providers/gce/tasks/host.py +++ b/bootstrapvz/providers/gce/tasks/host.py @@ -17,12 +17,25 @@ class DisableIPv6(Task): print >>config_file, "net.ipv6.conf.all.disable_ipv6 = 1" -class SetHostname(Task): - description = "Setting hostname" +class InstallHostnameHook(Task): + description = "Installing hostname hook" phase = phases.system_modification @classmethod def run(cls, info): + # There's a surprising amount of software out there which doesn't react well to the system + # hostname being set to a potentially long the fully qualified domain name, including Java 7 + # and lower, quite relevant to a lot of cloud use cases such as Hadoop. Since Google Compute + # Engine's out-of-the-box domain names are long but predictable based on project name, we + # install this hook to set the hostname to the short hostname but add a suitable /etc/hosts + # entry. + # + # Since not all operating systems which Google supports on Compute Engine work with the + # /etc/dhcp/dhclient-exit-hooks.d directory, Google's internally-built packaging uses the + # consistent install path of /usr/share/google/set-hostname, and OS-specific build steps are + # used to activate the DHCP hook. In any future Debian-maintained distro-specific packaging, + # the updated deb could handle installing the below symlink or the script itself into + # /etc/dhcp/dhclient-exit-hooks.d. log_check_call(['chroot', info.root, 'ln', '-s', '/usr/share/google/set-hostname', '/etc/dhcp/dhclient-exit-hooks.d/set-hostname']) diff --git a/bootstrapvz/providers/gce/tasks/initd.py b/bootstrapvz/providers/gce/tasks/initd.py new file mode 100644 index 0000000..f2b20bc --- /dev/null +++ b/bootstrapvz/providers/gce/tasks/initd.py @@ -0,0 +1,38 @@ +from bootstrapvz.base import Task +from bootstrapvz.common import phases +from bootstrapvz.common.tasks import initd +from bootstrapvz.common.tasks import kernel +from . import assets +import os.path + + +class AdjustExpandRootDev(Task): + description = 'Adjusting the expand-root device' + phase = phases.system_modification + predecessors = [initd.AddExpandRoot, initd.AdjustExpandRootScript] + + @classmethod + def run(cls, info): + from bootstrapvz.common.tools import sed_i + script = os.path.join(info.root, 'etc/init.d/expand-root') + sed_i(script, '/dev/xvda', '/dev/sda') + + +class AddGrowRootDisable(Task): + description = 'Add script to selectively disable growroot' + phase = phases.system_modification + successors = [kernel.UpdateInitramfs] + + @classmethod + def run(cls, info): + import stat + rwxr_xr_x = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | + stat.S_IRGRP | stat.S_IXGRP | + stat.S_IROTH | stat.S_IXOTH) + from shutil import copy + script_src = os.path.join(assets, + 'initramfs-tools/scripts/local-premount/gce-disable-growroot') + script_dst = os.path.join(info.root, + 'etc/initramfs-tools/scripts/local-premount/gce-disable-growroot') + copy(script_src, script_dst) + os.chmod(script_dst, rwxr_xr_x) diff --git a/bootstrapvz/providers/gce/tasks/packages-kernels.yml b/bootstrapvz/providers/gce/tasks/packages-kernels.yml new file mode 100644 index 0000000..1d5a4a0 --- /dev/null +++ b/bootstrapvz/providers/gce/tasks/packages-kernels.yml @@ -0,0 +1,14 @@ +--- +# This is a mapping of Debian release codenames to processor architectures to kernel packages +squeeze: # In squeeze, we need a special kernel flavor for xen + amd64: linux-image-xen-amd64 + i386: linux-image-xen-686 +wheezy: + amd64: linux-image-amd64 + i386: linux-image-686 +jessie: + amd64: linux-image-amd64 + i386: linux-image-686-pae +sid: + amd64: linux-image-amd64 + i386: linux-image-686-pae diff --git a/bootstrapvz/providers/gce/tasks/packages.py b/bootstrapvz/providers/gce/tasks/packages.py index 1667d35..2928a6d 100644 --- a/bootstrapvz/providers/gce/tasks/packages.py +++ b/bootstrapvz/providers/gce/tasks/packages.py @@ -1,15 +1,13 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks import apt -from bootstrapvz.common.tools import log_check_call +import logging import os -import os.path class DefaultPackages(Task): description = 'Adding image packages required for GCE' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): @@ -21,14 +19,30 @@ class DefaultPackages(Task): info.packages.add('openssh-client') info.packages.add('openssh-server') info.packages.add('dhcpd') + info.packages.add('ca-certificates') - kernel_packages_path = os.path.join(os.path.dirname(__file__), '../../ec2/tasks/packages-kernels.json') + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') from bootstrapvz.common.tools import config_get - kernel_package = config_get(kernel_packages_path, [info.release_codename, + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, info.manifest.system['architecture']]) info.packages.add(kernel_package) +class ReleasePackages(Task): + description = 'Adding release-specific packages required for GCE' + phase = phases.preparation + predecessors = [apt.AddBackports, DefaultPackages] + + @classmethod + def run(cls, info): + # Add release-specific packages, if available. + if info.source_lists.target_exists('wheezy-backports'): + info.packages.add('cloud-initramfs-growroot') + else: + msg = ('No release-specific packages found for {system.release}').format(**info.manifest_vars) + logging.getLogger(__name__).warning(msg) + + class GooglePackages(Task): description = 'Adding image packages required for GCE from Google repositories' phase = phases.preparation @@ -39,18 +53,3 @@ class GooglePackages(Task): info.packages.add('google-compute-daemon') info.packages.add('google-startup-scripts') info.packages.add('python-gcimagebundle') - info.packages.add('gcutil') - - -class InstallGSUtil(Task): - description = 'Install gsutil, not yet packaged' - phase = phases.package_installation - - @classmethod - def run(cls, info): - log_check_call(['wget', 'http://storage.googleapis.com/pub/gsutil.tar.gz']) - gsutil_directory = os.path.join(info.root, 'usr/local/share/google') - gsutil_binary = os.path.join(os.path.join(info.root, 'usr/local/bin'), 'gsutil') - os.makedirs(gsutil_directory) - log_check_call(['tar', 'xaf', 'gsutil.tar.gz', '-C', gsutil_directory]) - log_check_call(['ln', '-s', '../share/google/gsutil/gsutil', gsutil_binary]) diff --git a/bootstrapvz/providers/kvm/README.rst b/bootstrapvz/providers/kvm/README.rst new file mode 100644 index 0000000..57e60d4 --- /dev/null +++ b/bootstrapvz/providers/kvm/README.rst @@ -0,0 +1,8 @@ +KVM +--- + +The `KVM `__ provider creates +virtual images for Linux Kernel-based Virtual Machines. It supports the +installation of `virtio kernel +modules `__ (paravirtualized +drivers for IO operations). diff --git a/bootstrapvz/providers/kvm/__init__.py b/bootstrapvz/providers/kvm/__init__.py index f09b464..55879d4 100644 --- a/bootstrapvz/providers/kvm/__init__.py +++ b/bootstrapvz/providers/kvm/__init__.py @@ -11,7 +11,7 @@ def initialize(): def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) @@ -19,6 +19,7 @@ def resolve_tasks(taskset, manifest): taskset.update(task_groups.get_standard_groups(manifest)) taskset.update([tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, loopback.Create, initd.InstallInitScripts, ssh.AddOpenSSHPackage, @@ -27,7 +28,7 @@ def resolve_tasks(taskset, manifest): loopback.MoveImage, ]) - if manifest.bootstrapper.get('virtio', []): + if manifest.provider.get('virtio', []): from tasks import virtio taskset.update([virtio.VirtIO]) diff --git a/bootstrapvz/providers/kvm/manifest-schema.json b/bootstrapvz/providers/kvm/manifest-schema.json deleted file mode 100644 index 3586e4e..0000000 --- a/bootstrapvz/providers/kvm/manifest-schema.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "KVM manifest", - "type": "object", - "properties": { - "system": { - "type": "object", - "properties": { - "virtio": { - "type": "array", - "items": { - "type": "string", - "enum": ["virtio", - "virtio_pci", - "virtio_balloon", - "virtio_blk", - "virtio_net", - "virtio_ring"] - }, - "minItems": 1 - }, - "bootloader": { - "type": "string", - "enum": ["grub", "extlinux"] - } - } - }, - "volume": { - "type": "object", - "properties": { - "backing": { - "type": "string", - "enum": ["raw"] - }, - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none", "msdos", "gpt"] } - } - } - }, - "required": ["backing"] - } - } -} diff --git a/bootstrapvz/providers/kvm/manifest-schema.yml b/bootstrapvz/providers/kvm/manifest-schema.yml new file mode 100644 index 0000000..0deb7da --- /dev/null +++ b/bootstrapvz/providers/kvm/manifest-schema.yml @@ -0,0 +1,44 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: KVM manifest +type: object +properties: + provider: + type: object + properties: + virtio: + type: array + items: + type: string + enum: + - virtio + - virtio_pci + - virtio_balloon + - virtio_blk + - virtio_net + - virtio_ring + minItems: 1 + system: + type: object + properties: + bootloader: + type: string + enum: + - grub + - extlinux + volume: + type: object + properties: + backing: + type: string + enum: [raw] + partitions: + type: object + properties: + type: + type: string + enum: + - none + - msdos + - gpt + required: [backing] diff --git a/bootstrapvz/providers/kvm/tasks/packages-kernels.yml b/bootstrapvz/providers/kvm/tasks/packages-kernels.yml new file mode 100644 index 0000000..1279faf --- /dev/null +++ b/bootstrapvz/providers/kvm/tasks/packages-kernels.yml @@ -0,0 +1,14 @@ +--- +# This is a mapping of Debian release codenames to processor architectures to kernel packages +squeeze: + amd64: linux-image-amd64 + i386: linux-image-686 +wheezy: + amd64: linux-image-amd64 + i386: linux-image-686 +jessie: + amd64: linux-image-amd64 + i386: linux-image-686-pae +sid: + amd64: linux-image-amd64 + i386: linux-image-686-pae diff --git a/bootstrapvz/providers/kvm/tasks/packages.py b/bootstrapvz/providers/kvm/tasks/packages.py index 85ad028..9fe6bbe 100644 --- a/bootstrapvz/providers/kvm/tasks/packages.py +++ b/bootstrapvz/providers/kvm/tasks/packages.py @@ -1,15 +1,16 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt class DefaultPackages(Task): description = 'Adding image packages required for kvm' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): - kernels = {'amd64': 'linux-image-amd64', - 'i386': 'linux-image-686', } - info.packages.add(kernels.get(info.manifest.system['architecture'])) + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/providers/kvm/tasks/virtio.py b/bootstrapvz/providers/kvm/tasks/virtio.py index cb3ae68..402ba1a 100644 --- a/bootstrapvz/providers/kvm/tasks/virtio.py +++ b/bootstrapvz/providers/kvm/tasks/virtio.py @@ -12,5 +12,5 @@ class VirtIO(Task): modules = os.path.join(info.root, '/etc/initramfs-tools/modules') with open(modules, "a") as modules_file: modules_file.write("\n") - for module in info.manifest.system.get('virtio', []): + for module in info.manifest.provider.get('virtio', []): modules_file.write(module + "\n") diff --git a/bootstrapvz/providers/virtualbox/README.rst b/bootstrapvz/providers/virtualbox/README.rst new file mode 100644 index 0000000..37300ff --- /dev/null +++ b/bootstrapvz/providers/virtualbox/README.rst @@ -0,0 +1,12 @@ +VirtualBox +---------- + +The `VirtualBox `__ provider can bootstrap +to both .vdi and .vmdk images (raw images are also supported but do not +run in VirtualBox). It's advisable to always use vmdk images for +interoperability (e.g. +`OVF `__ files +*should* support vdi files, but since they have no identifier URL not +even VirtualBox itself can import them). VirtualBox Guest Additions can +be installed automatically if the ISO is `provided in the +manifest <../../../manifests#bootstrapper>`__. diff --git a/bootstrapvz/providers/virtualbox/__init__.py b/bootstrapvz/providers/virtualbox/__init__.py index 0ba1c1e..83411f8 100644 --- a/bootstrapvz/providers/virtualbox/__init__.py +++ b/bootstrapvz/providers/virtualbox/__init__.py @@ -9,7 +9,7 @@ def initialize(): def validate_manifest(data, validator, error): import os.path - schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json')) + schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.yml')) validator(data, schema_path) @@ -17,11 +17,12 @@ def resolve_tasks(taskset, manifest): taskset.update(task_groups.get_standard_groups(manifest)) taskset.update([tasks.packages.DefaultPackages, + loopback.AddRequiredCommands, loopback.Create, loopback.MoveImage, ]) - if manifest.bootstrapper.get('guest_additions', False): + if manifest.provider.get('guest_additions', False): from tasks import guest_additions taskset.update([guest_additions.CheckGuestAdditionsPath, guest_additions.AddGuestAdditionsPackages, diff --git a/bootstrapvz/providers/virtualbox/assets/install_guest_additions.sh b/bootstrapvz/providers/virtualbox/assets/install_guest_additions.sh new file mode 100644 index 0000000..3774c5d --- /dev/null +++ b/bootstrapvz/providers/virtualbox/assets/install_guest_additions.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +function uname { + if [[ $1 == '-r' ]]; then + echo "KERNEL_VERSION" + return 0 + elif [[ $1 == '-m' ]]; then + echo "KERNEL_ARCH" + return 0 + else + $(which uname) $@ + fi +} +export -f uname + +INSTALL_SCRIPT --nox11 diff --git a/bootstrapvz/providers/virtualbox/manifest-schema.json b/bootstrapvz/providers/virtualbox/manifest-schema.json deleted file mode 100644 index faefa6f..0000000 --- a/bootstrapvz/providers/virtualbox/manifest-schema.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "VirtualBox manifest", - "type": "object", - "properties": { - "bootstrapper": { - "type": "object", - "properties": { - "guest_additions": { - "type": "string" - } - } - }, - "system": { - "type": "object", - "properties": { - "bootloader": { - "type": "string", - "enum": ["grub", "extlinux"] - } - } - }, - "volume": { - "type": "object", - "properties": { - "backing": { - "type": "string", - "enum": ["raw", "vdi", "vmdk"] - }, - "partitions": { - "type": "object", - "properties": { - "type": { "enum": ["none", "msdos", "gpt"] } - } - } - }, - "required": ["backing"] - } - } -} diff --git a/bootstrapvz/providers/virtualbox/manifest-schema.yml b/bootstrapvz/providers/virtualbox/manifest-schema.yml new file mode 100644 index 0000000..1141e66 --- /dev/null +++ b/bootstrapvz/providers/virtualbox/manifest-schema.yml @@ -0,0 +1,36 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: VirtualBox manifest +type: object +properties: + bootstrapper: + type: object + properties: + guest_additions: {type: string} + system: + type: object + properties: + bootloader: + type: string + enum: + - grub + - extlinux + volume: + type: object + properties: + backing: + type: string + enum: + - raw + - vdi + - vmdk + partitions: + type: object + properties: + type: + type: string + enum: + - none + - msdos + - gpt + required: [backing] diff --git a/bootstrapvz/providers/virtualbox/tasks/guest_additions.py b/bootstrapvz/providers/virtualbox/tasks/guest_additions.py index e9a6257..350ce4a 100644 --- a/bootstrapvz/providers/virtualbox/tasks/guest_additions.py +++ b/bootstrapvz/providers/virtualbox/tasks/guest_additions.py @@ -2,6 +2,9 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases from bootstrapvz.common.tasks.packages import InstallPackages from bootstrapvz.common.exceptions import TaskError +import os + +assets = os.path.normpath(os.path.join(os.path.dirname(__file__), '../assets')) class CheckGuestAdditionsPath(Task): @@ -10,8 +13,7 @@ class CheckGuestAdditionsPath(Task): @classmethod def run(cls, info): - import os.path - guest_additions_path = info.manifest.bootstrapper['guest_additions'] + guest_additions_path = info.manifest.provider['guest_additions'] if not os.path.exists(guest_additions_path): msg = 'The file {file} does not exist.'.format(file=guest_additions_path) raise TaskError(msg) @@ -28,11 +30,18 @@ class AddGuestAdditionsPackages(Task): info.packages.add('build-essential') info.packages.add('dkms') - from bootstrapvz.common.tools import log_check_call - [kernel_version] = log_check_call(['chroot', info.root, - 'uname', '-r']) - kernel_headers_pkg = 'linux-headers-' + kernel_version + kernel_headers_pkg = 'linux-headers-' + if info.manifest.system['architecture'] == 'i386': + arch = 'i686' + kernel_headers_pkg += '686-pae' + else: + arch = 'x86_64' + kernel_headers_pkg += 'amd64' info.packages.add(kernel_headers_pkg) + info.kernel = { + 'arch': arch, + 'headers_pkg': kernel_headers_pkg, + } class InstallGuestAdditions(Task): @@ -42,19 +51,35 @@ class InstallGuestAdditions(Task): @classmethod def run(cls, info): - import os - guest_additions_path = info.manifest.bootstrapper['guest_additions'] + from bootstrapvz.common.tools import log_call, log_check_call + for line in log_check_call(['chroot', info.root, 'apt-cache', 'show', info.kernel['headers_pkg']]): + key, value = line.split(':') + if key.strip() == 'Depends': + kernel_version = value.strip().split('linux-headers-')[-1] + break + + guest_additions_path = info.manifest.provider['guest_additions'] mount_dir = 'mnt/guest_additions' mount_path = os.path.join(info.root, mount_dir) os.mkdir(mount_path) root = info.volume.partition_map.root root.add_mount(guest_additions_path, mount_path, ['-o', 'loop']) - install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run') + install_wrapper_name = 'install_guest_additions.sh' + install_wrapper = open(os.path.join(assets, install_wrapper_name)) \ + .read() \ + .replace("KERNEL_VERSION", kernel_version) \ + .replace("KERNEL_ARCH", info.kernel['arch']) \ + .replace("INSTALL_SCRIPT", install_script) + install_wrapper_path = os.path.join(info.root, install_wrapper_name) + with open(install_wrapper_path, 'w') as f: + f.write(install_wrapper + '\n') + # Don't check the return code of the scripts here, because 1 not necessarily means they have failed - from bootstrapvz.common.tools import log_call - log_call(['chroot', info.root, install_script, '--nox11']) + log_call(['chroot', info.root, 'bash', '/' + install_wrapper_name]) + # VBoxService process could be running, as it is not affected by DisableDaemonAutostart log_call(['chroot', info.root, 'service', 'vboxadd-service', 'stop']) root.remove_mount(mount_path) os.rmdir(mount_path) + os.remove(install_wrapper_path) diff --git a/bootstrapvz/providers/virtualbox/tasks/packages-kernels.yml b/bootstrapvz/providers/virtualbox/tasks/packages-kernels.yml new file mode 100644 index 0000000..1279faf --- /dev/null +++ b/bootstrapvz/providers/virtualbox/tasks/packages-kernels.yml @@ -0,0 +1,14 @@ +--- +# This is a mapping of Debian release codenames to processor architectures to kernel packages +squeeze: + amd64: linux-image-amd64 + i386: linux-image-686 +wheezy: + amd64: linux-image-amd64 + i386: linux-image-686 +jessie: + amd64: linux-image-amd64 + i386: linux-image-686-pae +sid: + amd64: linux-image-amd64 + i386: linux-image-686-pae diff --git a/bootstrapvz/providers/virtualbox/tasks/packages.py b/bootstrapvz/providers/virtualbox/tasks/packages.py index 8235c32..8bae0b8 100644 --- a/bootstrapvz/providers/virtualbox/tasks/packages.py +++ b/bootstrapvz/providers/virtualbox/tasks/packages.py @@ -1,15 +1,16 @@ from bootstrapvz.base import Task from bootstrapvz.common import phases -from bootstrapvz.common.tasks import apt class DefaultPackages(Task): description = 'Adding image packages required for virtualbox' phase = phases.preparation - predecessors = [apt.AddDefaultSources] @classmethod def run(cls, info): - kernels = {'amd64': 'linux-image-amd64', - 'i386': 'linux-image-686', } - info.packages.add(kernels.get(info.manifest.system['architecture'])) + import os.path + kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.yml') + from bootstrapvz.common.tools import config_get + kernel_package = config_get(kernel_packages_path, [info.manifest.release.codename, + info.manifest.system['architecture']]) + info.packages.add(kernel_package) diff --git a/bootstrapvz/remote/README.rst b/bootstrapvz/remote/README.rst new file mode 100644 index 0000000..69335c7 --- /dev/null +++ b/bootstrapvz/remote/README.rst @@ -0,0 +1,171 @@ +Remote bootstrapping +==================== + +bootstrap-vz is able to bootstrap images not only on the machine +on which it is invoked, but also on remote machines that have bootstrap-vz +installed. + +This is helpful when you create manifests on your own workstation, but have a +beefed up remote build server which can create images quickly. +There may also be situations where you want to build multiple manifests that +have different providers and require the host machines to be running on +that provider (e.g. EBS backed AMIs can only be created on EC2 instances), +when doing this multiple times SSHing into the machines and copying the +manifests can be a hassle. + +Lastly, the main motivation for supporting remote bootstrapping is the +automation of `integration testing <../../tests/integration>`__. +As you will see `further down <#bootstrap-vz-remote>`__, +bootstrap-vz is able to select which build server is required +for a specific test and run the bootstrapping procedure on said server. + + +bootstrap-vz-remote +------------------- +Normally you'd use ``bootstrap-vz`` to start a bootstrapping process. +When bootstrapping remotely simply use ``bootstrap-vz-remote`` instead, +it takes the same arguments plus a few additional ones: + +* ``--servers ``: Path to a list of build-servers + (see `build-servers.yml <#build-servers-yml>`__ for more info) +* ``--name ``: Selects a specific build-server from the list + of build-servers +* ``--release ``: Restricts the autoselection of build-servers + to the ones with the specified release + +Much like when bootstrapping directly, you can press ``Ctrl+C`` at any time +to abort the bootstrapping process. +The remote process will receive the keyboard interrupt signal +and begin cleaning up - pressing ``Ctrl+C`` a second time will abort that as +well and kill the connection immediately. + +Note that there is also a ``bootstrap-vz-server``, this file is not meant to be +invoked directly by the user, but is instead launched by bootstrap-vz on the +remote server when connecting to it. + + +Dependencies +------------ +For the remote bootstrapping procedure to work, you will need to install +bootstrap-vz as well as the ``sudo`` command on the remote machine. +Also make sure that all the needed dependencies for bootstrapping your image +are installed. + +Locally the pip package `Pyro4`__ is needed. + +__ https://pypi.python.org/pypi/Pyro4 + + + +build-servers.yml +----------------- +The file ``build-servers.yml`` informs bootstrap-vz about the different +build servers you have at your disposal. +In its simplest form you can just add your own machine like this: + +.. code:: yaml + + local: + type: local + can_bootstrap: [virtualbox] + release: jessie + build_settings: {} + +``type`` specifies how bootstrap-vz should connect to the build-server. +``local`` simply means that it will call the bootstrapping procedure directly, +no new process is spawned. + +``can_bootstrap`` tells bootstrap-vz for which providers this machine is capable +of building images. With the exception of the EC2 provider, +the accepted values match the accepted provider names in the manifest. +For EC2 you can specify ``ec2-s3`` and/or ``ec2-ebs``. +``ec2-ebs`` specifies that the machine in question can bootstrap EBS backed +images and should only be used when the it is located on EC2. +``ec2-s3`` signifies that the machine is capable of bootstrapping S3 backed +images. + +Beyond being a string, the value of ``release`` is not enforced in any way. +It's only current use is for ``bootstrap-vz-remote`` where you can restrict +which build-server should be autoselected. + + +Remote settings +~~~~~~~~~~~~~~~ +The other (and more interesting) setting for ``type`` is ``ssh``, +which requires a few more configuration settings: + +.. code:: yaml + + local_vm: + type: ssh + can_bootstrap: + - virtualbox + - ec2-s3 + release: wheezy + # remote settings below here + address: 127.0.0.1 + port: 2222 + username: admin + keyfile: path_to_private_key_file + server_bin: /root/bootstrap/bootstrap-vz-server + + +The last 5 settings specify how bootstrap-vz can connect +to the remote build-server. +While the initial handshake is achieved through SSH, bootstrap-vz mainly +communicates with its counterpart through RPC (the communication port is +automatically forwarded through an SSH tunnel). +``address``, ``port``, ``username`` and ``keyfile`` are hopefully +self explanatory (remote machine address, SSH port, login name and path to +private SSH key file). + +``server_bin`` refers to the `aboved mentioned <#bootstrap-vz-remote>`__ +bootstrap-vz-server executable. This is the command bootstrap-vz executes +on the remote machine to start the RPC server. + +Be aware that there are a few limitations as to what bootstrap-vz is able to +deal with, regarding the remote machine setup (in time they may be fixed +by a benevolent contributor): + +* The login user must be able to execute sudo without a password +* The private key file must be added to the ssh-agent before invocation + (alternatively it may not be password protected) +* The server must already be part of the known_hosts list + (bootstrap-vz uses ``ssh`` directly and cannot handle interactive prompts) + + +Build settings +~~~~~~~~~~~~~~ +The build settings allow you to override specific manifest properties. +This is useful when for example the VirtualBox guest additions ISO is located +at ``/root/guest_additions.iso`` on server 1, while server 2 has it at +``/root/images/vbox.iso``. + +.. code:: yaml + + local: + type: local + can_bootstrap: + - virtualbox + - ec2-s3 + release: jessie + build_settings: + guest_additions: /root/images/VBoxGuestAdditions.iso + apt_proxy: + address: 127.0.0.1 + port: 3142 + ec2-credentials: + access-key: AFAKEACCESSKEYFORAWS + secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva + certificate: /root/manifests/cert.pem + private-key: /root/manifests/pk.pem + user-id: 1234-1234-1234 + s3-region: eu-west-1 + +* ``guest_additions`` specifies the path to the VirtualBox guest additions ISO + on the remote machine. +* ``apt_proxy`` sets the configuration for the `apt_proxy plugin <../plugins/apt_proxy>`. +* ``ec2-credentials`` contains all the settings you know from EC2 manifests, + note that when running `integration tests <../../tests/integration>`__, + these credentials are also used when running instances. +* ``s3-region`` overrides the s3 bucket region when bootstrapping S3 backed images. diff --git a/bootstrapvz/remote/__init__.py b/bootstrapvz/remote/__init__.py new file mode 100644 index 0000000..35303e2 --- /dev/null +++ b/bootstrapvz/remote/__init__.py @@ -0,0 +1,108 @@ +"""Remote module containing methods to bootstrap remotely +""" +from Pyro4.util import SerializerBase +import logging +log = logging.getLogger(__name__) + +supported_classes = ['bootstrapvz.base.manifest.Manifest', + 'bootstrapvz.base.bootstrapinfo.BootstrapInformation', + 'bootstrapvz.base.bootstrapinfo.DictClass', + 'bootstrapvz.common.fs.loopbackvolume.LoopbackVolume', + 'bootstrapvz.common.fs.qemuvolume.QEMUVolume', + 'bootstrapvz.common.fs.virtualdiskimage.VirtualDiskImage', + 'bootstrapvz.common.fs.virtualmachinedisk.VirtualMachineDisk', + 'bootstrapvz.base.fs.partitionmaps.gpt.GPTPartitionMap', + 'bootstrapvz.base.fs.partitionmaps.msdos.MSDOSPartitionMap', + 'bootstrapvz.base.fs.partitionmaps.none.NoPartitions', + 'bootstrapvz.base.fs.partitions.mount.Mount', + 'bootstrapvz.base.fs.partitions.gpt.GPTPartition', + 'bootstrapvz.base.fs.partitions.gpt_swap.GPTSwapPartition', + 'bootstrapvz.base.fs.partitions.msdos.MSDOSPartition', + 'bootstrapvz.base.fs.partitions.msdos_swap.MSDOSSwapPartition', + 'bootstrapvz.base.fs.partitions.single.SinglePartition', + 'bootstrapvz.base.fs.partitions.unformatted.UnformattedPartition', + 'bootstrapvz.common.bytes.Bytes', + 'bootstrapvz.common.sectors.Sectors', + ] + +supported_exceptions = ['bootstrapvz.common.exceptions.ManifestError', + 'bootstrapvz.common.exceptions.TaskListError', + 'bootstrapvz.common.exceptions.TaskError', + 'bootstrapvz.base.fs.exceptions.VolumeError', + 'bootstrapvz.base.fs.exceptions.PartitionError', + 'bootstrapvz.base.pkg.exceptions.PackageError', + 'bootstrapvz.base.pkg.exceptions.SourceError', + 'bootstrapvz.common.exceptions.UnitError', + 'bootstrapvz.common.fsm_proxy.FSMProxyError', + 'subprocess.CalledProcessError', + ] + + +def register_deserialization_handlers(): + for supported_class in supported_classes: + SerializerBase.register_dict_to_class(supported_class, deserialize) + for supported_exc in supported_exceptions: + SerializerBase.register_dict_to_class(supported_exc, deserialize_exception) + import subprocess + SerializerBase.register_class_to_dict(subprocess.CalledProcessError, serialize_called_process_error) + + +def unregister_deserialization_handlers(): + for supported_class in supported_classes: + SerializerBase.unregister_dict_to_class(supported_class, deserialize) + for supported_exc in supported_exceptions: + SerializerBase.unregister_dict_to_class(supported_exc, deserialize_exception) + + +def deserialize_exception(fq_classname, data): + class_object = get_class_object(fq_classname) + return SerializerBase.make_exception(class_object, data) + + +def deserialize(fq_classname, data): + class_object = get_class_object(fq_classname) + from Pyro4.util import SerpentSerializer + from Pyro4.errors import SecurityError + ser = SerpentSerializer() + state = {} + for key, value in data.items(): + try: + state[key] = ser.recreate_classes(value) + except SecurityError as e: + msg = 'Unable to deserialize key `{key}\' on {class_name}'.format(key=key, class_name=fq_classname) + raise Exception(msg, e) + + instance = class_object.__new__(class_object) + instance.__setstate__(state) + return instance + + +def serialize_called_process_error(obj): + # This is by far the weirdest exception serialization. + # There is a bug in both Pyro4 and the Python subprocess module. + # CalledProcessError does not populate its args property, + # although according to https://docs.python.org/2/library/exceptions.html#exceptions.BaseException.args + # it should... + # So we populate that property during serialization instead + # (the code is grabbed directly from Pyro4's class_to_dict()) + # However, Pyro4 still cannot figure out to call the deserializer + # unless we also use setattr() on the exception to set the args below + # (before throwing it). + # Mind you, the error "__init__() takes at least 3 arguments (2 given)" + # is thrown *on the server* if we don't use setattr(). + # It's all very confusing to me and I'm not entirely + # sure what the exact problem is. Regardless - it works, so there. + return {'__class__': obj.__class__.__module__ + '.' + obj.__class__.__name__, + '__exception__': True, + 'args': (obj.returncode, obj.cmd, obj.output), + 'attributes': vars(obj) # add custom exception attributes + } + + +def get_class_object(fq_classname): + parts = fq_classname.split('.') + module_name = '.'.join(parts[:-1]) + class_name = parts[-1] + import importlib + imported_module = importlib.import_module(module_name) + return getattr(imported_module, class_name) diff --git a/bootstrapvz/remote/build_servers/__init__.py b/bootstrapvz/remote/build_servers/__init__.py new file mode 100644 index 0000000..f6da332 --- /dev/null +++ b/bootstrapvz/remote/build_servers/__init__.py @@ -0,0 +1,46 @@ + + +def pick_build_server(build_servers, manifest, preferences={}): + # Validate the build servers list + from bootstrapvz.common.tools import load_data + import os.path + schema = load_data(os.path.normpath(os.path.join(os.path.dirname(__file__), 'build-servers-schema.yml'))) + import jsonschema + jsonschema.validate(build_servers, schema) + + if manifest['provider']['name'] == 'ec2': + must_bootstrap = 'ec2-' + manifest['volume']['backing'] + else: + must_bootstrap = manifest['provider']['name'] + + def matches(name, settings): + if preferences.get('name', name) != name: + return False + if preferences.get('release', settings['release']) != settings['release']: + return False + if must_bootstrap not in settings['can_bootstrap']: + return False + return True + + for name, settings in build_servers.iteritems(): + if not matches(name, settings): + continue + if settings['type'] == 'local': + from local import LocalBuildServer + return LocalBuildServer(name, settings) + else: + from remote import RemoteBuildServer + return RemoteBuildServer(name, settings) + raise Exception('Unable to find a build server that matches your preferences.') + + +def getNPorts(n, port_range=(1024, 65535)): + import random + ports = [] + for i in range(0, n): + while True: + port = random.randrange(*port_range) + if port not in ports: + ports.append(port) + break + return ports diff --git a/bootstrapvz/remote/build_servers/build-servers-schema.yml b/bootstrapvz/remote/build_servers/build-servers-schema.yml new file mode 100644 index 0000000..077ee50 --- /dev/null +++ b/bootstrapvz/remote/build_servers/build-servers-schema.yml @@ -0,0 +1,67 @@ +--- +$schema: http://json-schema.org/draft-04/schema# +title: Build server settings list +type: object +properties: + local: + type: object + properties: + type: {enum: [local]} + can_bootstrap: {$ref: '#/definitions/can_bootstrap'} + release: {type: string} + build_settings: {$ref: '#/definitions/build_settings'} + required: [type, can_bootstrap, release] +patternProperties: + ^(?!local).*$: {$ref: '#/definitions/ssh'} + +definitions: + absolute_path: + type: string + pattern: ^/[^\0]+$ + + can_bootstrap: + type: array + items: + enum: + - virtualbox + - ec2-ebs + - ec2-s3 + + build_settings: + type: object + properties: + guest_additions: {$ref: '#/definitions/absolute_path'} + ec2-credentials: + required: [access-key, secret-key] + type: object + properties: + access-key: {type: string} + secret-key: {type: string} + certificate: {type: string} + private-key: {type: string} + user-id: + type: string + pattern: (^arn:aws:iam::\d*:user/\w.*$)|(^\d{4}-\d{4}-\d{4}$) + additional_properties: false + apt_proxy: + type: object + properties: + address: {type: string} + port: {type: integer} + persistent: {type: boolean} + required: [address, port] + + ssh: + type: object + properties: + type: {enum: [ssh]} + can_bootstrap: {$ref: '#/definitions/can_bootstrap'} + build_settings: {$ref: '#/definitions/build_settings'} + release: {type: string} + address: {type: string} + port: {type: integer} + username: {type: string} + password: {type: string} + keyfile: {$ref: '#/definitions/absolute_path'} + server_bin: {$ref: '#/definitions/absolute_path'} + required: [type, can_bootstrap, release] diff --git a/bootstrapvz/remote/build_servers/build_server.py b/bootstrapvz/remote/build_servers/build_server.py new file mode 100644 index 0000000..0cdf6a3 --- /dev/null +++ b/bootstrapvz/remote/build_servers/build_server.py @@ -0,0 +1,26 @@ + + +class BuildServer(object): + + def __init__(self, name, settings): + self.name = name + self.settings = settings + self.build_settings = settings.get('build_settings', {}) + self.can_bootstrap = settings['can_bootstrap'] + self.release = settings.get('release', None) + + def apply_build_settings(self, manifest_data): + if manifest_data['provider']['name'] == 'virtualbox' and 'guest_additions' in manifest_data['provider']: + manifest_data['provider']['guest_additions'] = self.build_settings['guest_additions'] + if 'apt_proxy' in self.build_settings: + manifest_data.get('plugins', {})['apt_proxy'] = self.build_settings['apt_proxy'] + if 'ec2-credentials' in self.build_settings: + if 'credentials' not in manifest_data['provider']: + manifest_data['provider']['credentials'] = {} + for key in ['access-key', 'secret-key', 'certificate', 'private-key', 'user-id']: + if key in self.build_settings['ec2-credentials']: + manifest_data['provider']['credentials'][key] = self.build_settings['ec2-credentials'][key] + if 's3-region' in self.build_settings and manifest_data['volume']['backing'] == 's3': + if 'region' not in manifest_data['image']: + manifest_data['image']['region'] = self.build_settings['s3-region'] + return manifest_data diff --git a/bootstrapvz/remote/build_servers/callback.py b/bootstrapvz/remote/build_servers/callback.py new file mode 100644 index 0000000..2df5b82 --- /dev/null +++ b/bootstrapvz/remote/build_servers/callback.py @@ -0,0 +1,37 @@ +import Pyro4 +import logging + +Pyro4.config.REQUIRE_EXPOSE = True +log = logging.getLogger(__name__) + + +class CallbackServer(object): + + def __init__(self, listen_port, remote_port): + self.daemon = Pyro4.Daemon(host='localhost', port=listen_port, + nathost='localhost', natport=remote_port, + unixsocket=None) + self.daemon.register(self) + + def __enter__(self): + def serve(): + self.daemon.requestLoop() + from threading import Thread + self.thread = Thread(target=serve) + log.debug('Starting callback server') + self.thread.start() + return self + + def __exit__(self, type, value, traceback): + log.debug('Shutting down callback server') + self.daemon.shutdown() + self.thread.join() + + @Pyro4.expose + def handle_log(self, pickled_record): + import pickle + record = pickle.loads(pickled_record) + log = logging.getLogger() + record.extra = getattr(record, 'extra', {}) + record.extra['source'] = 'remote' + log.handle(record) diff --git a/bootstrapvz/remote/build_servers/local.py b/bootstrapvz/remote/build_servers/local.py new file mode 100644 index 0000000..0d29943 --- /dev/null +++ b/bootstrapvz/remote/build_servers/local.py @@ -0,0 +1,16 @@ +from build_server import BuildServer +from contextlib import contextmanager + + +class LocalBuildServer(BuildServer): + + @contextmanager + def connect(self): + yield LocalConnection() + + +class LocalConnection(object): + + def run(self, *args, **kwargs): + from bootstrapvz.base.main import run + return run(*args, **kwargs) diff --git a/bootstrapvz/remote/build_servers/remote.py b/bootstrapvz/remote/build_servers/remote.py new file mode 100644 index 0000000..2177550 --- /dev/null +++ b/bootstrapvz/remote/build_servers/remote.py @@ -0,0 +1,130 @@ +from build_server import BuildServer +from bootstrapvz.common.tools import log_check_call +from contextlib import contextmanager +import logging +log = logging.getLogger(__name__) + + +class RemoteBuildServer(BuildServer): + + def __init__(self, name, settings): + super(RemoteBuildServer, self).__init__(name, settings) + self.address = settings['address'] + self.port = settings['port'] + self.username = settings['username'] + self.password = settings.get('password', None) + self.keyfile = settings['keyfile'] + self.server_bin = settings['server_bin'] + + @contextmanager + def connect(self): + with self.spawn_server() as forwards: + args = {'listen_port': forwards['local_callback_port'], + 'remote_port': forwards['remote_callback_port']} + from callback import CallbackServer + with CallbackServer(**args) as callback_server: + with connect_pyro('localhost', forwards['local_server_port']) as connection: + connection.set_callback_server(callback_server) + yield connection + + @contextmanager + def spawn_server(self): + from . import getNPorts + # We can't use :0 for the forwarding ports because + # A: It's quite hard to retrieve the port on the remote after the daemon has started + # B: SSH doesn't accept 0:localhost:0 as a port forwarding option + [local_server_port, local_callback_port] = getNPorts(2) + [remote_server_port, remote_callback_port] = getNPorts(2) + + server_cmd = ['sudo', self.server_bin, '--listen', str(remote_server_port)] + + def set_process_group(): + # Changes the process group of a command so that any SIGINT + # for the main thread will not be propagated to it. + # We'd like to handle SIGINT ourselves (i.e. propagate the shutdown to the serverside) + import os + os.setpgrp() + + addr_arg = '{user}@{host}'.format(user=self.username, host=self.address) + ssh_cmd = ['ssh', '-i', self.keyfile, + '-p', str(self.port), + '-L' + str(local_server_port) + ':localhost:' + str(remote_server_port), + '-R' + str(remote_callback_port) + ':localhost:' + str(local_callback_port), + addr_arg] + full_cmd = ssh_cmd + ['--'] + server_cmd + + log.debug('Opening SSH connection to build server `{name}\''.format(name=self.name)) + import sys + import subprocess + ssh_process = subprocess.Popen(args=full_cmd, stdout=sys.stderr, stderr=sys.stderr, + preexec_fn=set_process_group) + try: + yield {'local_server_port': local_server_port, + 'local_callback_port': local_callback_port, + 'remote_server_port': remote_server_port, + 'remote_callback_port': remote_callback_port} + finally: + log.debug('Waiting for SSH connection to the build server to close') + import time + start = time.time() + while ssh_process.poll() is None: + if time.time() - start > 5: + log.debug('Forcefully terminating SSH connection to the build server') + ssh_process.terminate() + break + else: + time.sleep(0.5) + + def download(self, src, dst): + log.debug('Downloading file `{src}\' from ' + 'build server `{name}\' to `{dst}\'' + .format(src=src, dst=dst, name=self.name)) + # Make sure we can read the file as {user} + self._remote_command(['sudo', 'chown', self.username, src]) + src_arg = '{user}@{host}:{path}'.format(user=self.username, host=self.address, path=src) + log_check_call(['scp', '-i', self.keyfile, '-P', str(self.port), + src_arg, dst]) + + def delete(self, path): + log.debug('Deleting file `{path}\' on build server `{name}\''.format(path=path, name=self.name)) + self._remote_command(['sudo', 'rm', path]) + + def _remote_command(self, command): + ssh_cmd = ['ssh', '-i', self.keyfile, + '-p', str(self.port), + self.username + '@' + self.address, + '--'] + command + log_check_call(ssh_cmd) + + +@contextmanager +def connect_pyro(host, port): + import Pyro4 + server_uri = 'PYRO:server@{host}:{port}'.format(host=host, port=port) + connection = Pyro4.Proxy(server_uri) + + log.debug('Connecting to RPC daemon') + + connected = False + try: + remaining_retries = 5 + while not connected: + try: + connection.ping() + connected = True + except (Pyro4.errors.ConnectionClosedError, Pyro4.errors.CommunicationError): + if remaining_retries > 0: + remaining_retries -= 1 + from time import sleep + sleep(2) + else: + raise + + yield connection + finally: + if connected: + log.debug('Stopping RPC daemon') + connection.stop() + connection._pyroRelease() + else: + log.warn('Unable to stop RPC daemon, it might still be running on the server') diff --git a/bootstrapvz/remote/log.py b/bootstrapvz/remote/log.py new file mode 100644 index 0000000..fc7d66d --- /dev/null +++ b/bootstrapvz/remote/log.py @@ -0,0 +1,23 @@ +import logging + + +class LogForwarder(logging.Handler): + + def __init__(self, level=logging.NOTSET): + self.server = None + super(LogForwarder, self).__init__(level) + + def set_server(self, server): + self.server = server + + def emit(self, record): + if self.server is not None: + if record.exc_info is not None: + import traceback + exc_type, exc_value, exc_traceback = record.exc_info + record.extra = getattr(record, 'extra', {}) + record.extra['traceback'] = traceback.format_exception(exc_type, exc_value, exc_traceback) + record.exc_info = None + # TODO: Use serpent instead + import pickle + self.server.handle_log(pickle.dumps(record)) diff --git a/bootstrapvz/remote/main.py b/bootstrapvz/remote/main.py new file mode 100644 index 0000000..9613088 --- /dev/null +++ b/bootstrapvz/remote/main.py @@ -0,0 +1,70 @@ +"""Main module containing all the setup necessary for running the remote bootstrapping process +""" + + +def main(): + """Main function for invoking the bootstrap process remotely + """ + # Get the commandline arguments + opts = get_opts() + + from bootstrapvz.common.tools import load_data + # load the manifest data, we might want to modify it later on + manifest_data = load_data(opts['MANIFEST']) + + # load the build servers file + build_servers = load_data(opts['--servers']) + # Pick a build server + from build_servers import pick_build_server + preferences = {} + if opts['--name'] is not None: + preferences['name'] = opts['--name'] + if opts['--release'] is not None: + preferences['release'] = opts['--release'] + build_server = pick_build_server(build_servers, manifest_data, preferences) + + # Apply the build server settings to the manifest (e.g. the virtualbox guest additions path) + manifest_data = build_server.apply_build_settings(manifest_data) + + # Load the manifest + from bootstrapvz.base.manifest import Manifest + manifest = Manifest(path=opts['MANIFEST'], data=manifest_data) + + # Set up logging + from bootstrapvz.base.main import setup_loggers + setup_loggers(opts) + + # Register deserialization handlers for objects + # that will pass between server and client + from . import register_deserialization_handlers + register_deserialization_handlers() + + # Everything has been set up, connect to the server and begin the bootstrapping process + with build_server.connect() as connection: + connection.run(manifest, + debug=opts['--debug'], + dry_run=opts['--dry-run']) + + +def get_opts(): + """Creates an argument parser and returns the arguments it has parsed + """ + from docopt import docopt + usage = """bootstrap-vz-remote + +Usage: bootstrap-vz-remote [options] --servers= MANIFEST + +Options: + --servers Path to list of build servers + --name Selects specific server from the build servers list + --release Require the build server OS to be a specific release + --log Log to given directory [default: /var/log/bootstrap-vz] + If is `-' file logging will be disabled. + --pause-on-error Pause on error, before rollback + --dry-run Don't actually run the tasks + --color=auto|always|never + Colorize the console output [default: auto] + --debug Print debugging information + -h, --help show this help + """ + return docopt(usage) diff --git a/bootstrapvz/remote/server.py b/bootstrapvz/remote/server.py new file mode 100644 index 0000000..bf37cbf --- /dev/null +++ b/bootstrapvz/remote/server.py @@ -0,0 +1,130 @@ +import Pyro4 +import logging + +Pyro4.config.REQUIRE_EXPOSE = True +log = logging.getLogger(__name__) + + +def main(): + opts = getopts() + from . import register_deserialization_handlers + register_deserialization_handlers() + log_forwarder = setup_logging() + server = Server(opts['--listen'], log_forwarder) + server.start() + + +def setup_logging(): + root = logging.getLogger() + root.setLevel(logging.NOTSET) + + from log import LogForwarder + log_forwarder = LogForwarder() + root.addHandler(log_forwarder) + + from datetime import datetime + import os.path + from bootstrapvz.base.log import get_file_handler + timestamp = datetime.now().strftime('%Y%m%d%H%M%S') + filename = '{timestamp}_remote.log'.format(timestamp=timestamp) + logfile_path = os.path.join('/var/log/bootstrap-vz', filename) + file_handler = get_file_handler(logfile_path, True) + root.addHandler(file_handler) + + return log_forwarder + + +def getopts(): + from docopt import docopt + usage = """bootstrap-vz-server + +Usage: bootstrap-vz-server [options] + +Options: + --listen Serve on specified port [default: 46675] + -h, --help show this help +""" + return docopt(usage) + + +class Server(object): + + def __init__(self, listen_port, log_forwarder): + self.stop_serving = False + self.log_forwarder = log_forwarder + self.listen_port = listen_port + + def start(self): + Pyro4.config.COMMTIMEOUT = 0.5 + daemon = Pyro4.Daemon('localhost', port=int(self.listen_port), unixsocket=None) + daemon.register(self, 'server') + + daemon.requestLoop(loopCondition=lambda: not self.stop_serving) + + @Pyro4.expose + def set_callback_server(self, server): + log.debug('Forwarding logs to the callback server') + self.log_forwarder.set_server(server) + + @Pyro4.expose + def ping(self): + if hasattr(self, 'connection_timeout'): + self.connection_timeout.cancel() + del self.connection_timeout + return 'pong' + + @Pyro4.expose + def stop(self): + if hasattr(self, 'bootstrap_process'): + log.warn('Sending SIGINT to bootstrapping process') + import os + import signal + os.killpg(self.bootstrap_process.pid, signal.SIGINT) + self.bootstrap_process.join() + + # We can't send a SIGINT to the server, + # for some reason the Pyro4 shutdowns are rather unclean, + # throwing exceptions and such. + self.stop_serving = True + + @Pyro4.expose + def run(self, manifest, debug=False, dry_run=False): + + def bootstrap(queue): + # setsid() creates a new session, making this process the group leader. + # We do that, so when the server calls killpg (kill process group) + # on us, it won't kill itself (this process was spawned from a + # thread under the server, meaning it's part of the same group). + # The process hierarchy looks like this: + # Pyro server (process - listening on a port) + # +- pool thread + # +- pool thread + # +- pool thread + # +- started thread (the one that got the "run()" call) + # L bootstrap() process (us) + # Calling setsid() also fixes another problem: + # SIGINTs sent to this process seem to be redirected + # to the process leader. Since there is a thread between + # us and the process leader, the signal will not be propagated + # (signals are not propagated to threads), this means that any + # subprocess we start (i.e. debootstrap) will not get a SIGINT. + import os + os.setsid() + from bootstrapvz.base.main import run + try: + bootstrap_info = run(manifest, debug=debug, dry_run=dry_run) + queue.put(bootstrap_info) + except (Exception, KeyboardInterrupt) as e: + queue.put(e) + + from multiprocessing import Queue + from multiprocessing import Process + queue = Queue() + self.bootstrap_process = Process(target=bootstrap, args=(queue,)) + self.bootstrap_process.start() + self.bootstrap_process.join() + del self.bootstrap_process + result = queue.get() + if isinstance(result, Exception): + raise result + return result diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 66d46ad..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,177 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/bootstrap-vz.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/bootstrap-vz.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/bootstrap-vz" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/bootstrap-vz" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/README.rst b/docs/README.rst new file mode 100644 index 0000000..08ab12b --- /dev/null +++ b/docs/README.rst @@ -0,0 +1,44 @@ +:orphan: + + +Documentation +============= +Both the end-user and developer documentation is combined into a single sphinx +build (the two were previously split between github pages and sphinx). + + +Building +-------- +To build the documentation, simply run ``tox -e docs`` in the project root. +Serving the docs through http can be achieved by subsequently running +``(cd docs/_build/html; python -m SimpleHTTPServer 8080)`` and accessing them +on ``http://localhost:8080/``. + + +READMEs +------- +Many of the folders in the project have a README.rst which describes +the purpose of the contents in that folder. +These files are automatically included when building the documentation, +through use of the `include`__ directive. + +__ http://docutils.sourceforge.net/docs/ref/rst/directives.html#including-an-external-document-fragment + +Include files for the providers and plugins are autogenerated +through the sphinx conf.py script. + + +Links +----- +All links in rst files outside of ``docs/`` (but also ``docs/README.rst``) that +link to other rst files are relative and reference folder names when the link +would point at a README.rst otherwise. This is done to take advantage of the +github feature where README files are displayed when viewing its parent folder. +When accessing the ``manifests/`` folder for example, the documentation for how +manifests work is displayed at the bottom. + +When sphinx generates the documentation, these relative links are +automatically converted into relative links that work inside the generated +html pages instead. +If you are interested in how this works, take a look at the +link transformation module in ``docs/transform_github_links``. diff --git a/docs/__init__.py b/docs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docs/_static/.gitignore b/docs/_static/.gitignore new file mode 100644 index 0000000..4dd81db --- /dev/null +++ b/docs/_static/.gitignore @@ -0,0 +1 @@ +graph.json diff --git a/docs/_static/graph.json b/docs/_static/graph.json deleted file mode 100644 index 3caaa2b..0000000 --- a/docs/_static/graph.json +++ /dev/null @@ -1 +0,0 @@ -{"phases": [{"name": "Preparation", "description": "Initializing connections, fetching data etc."}, {"name": "Volume creation", "description": "Creating the volume to bootstrap onto"}, {"name": "Volume preparation", "description": "Formatting the bootstrap volume"}, {"name": "Volume mounting", "description": "Mounting bootstrap volume"}, {"name": "OS installation", "description": "Installing the operating system"}, {"name": "Package installation", "description": "Installing software"}, {"name": "System modification", "description": "Modifying configuration files, adding resources, etc."}, {"name": "System cleaning", "description": "Removing sensitive data, temporary files and other leftovers"}, {"name": "Volume unmounting", "description": "Unmounting the bootstrap volume"}, {"name": "Image registration", "description": "Uploading/Registering with the provider"}, {"name": "Cleaning", "description": "Removing temporary files"}], "modules": [{"name": "bootstrapvz.common.tasks.apt"}, {"name": "bootstrapvz.common.tasks.boot"}, {"name": "bootstrapvz.common.tasks.bootstrap"}, {"name": "bootstrapvz.common.tasks.cleanup"}, {"name": "bootstrapvz.common.tasks.development"}, {"name": "bootstrapvz.common.tasks.filesystem"}, {"name": "bootstrapvz.common.tasks.host"}, {"name": "bootstrapvz.common.tasks.initd"}, {"name": "bootstrapvz.common.tasks.locale"}, {"name": "bootstrapvz.common.tasks.loopback"}, {"name": "bootstrapvz.common.tasks.network"}, {"name": "bootstrapvz.common.tasks.packages"}, {"name": "bootstrapvz.common.tasks.partitioning"}, {"name": "bootstrapvz.common.tasks.security"}, {"name": "bootstrapvz.common.tasks.ssh"}, {"name": "bootstrapvz.common.tasks.volume"}, {"name": "bootstrapvz.common.tasks.workspace"}, {"name": "bootstrapvz.plugins.admin_user.tasks"}, {"name": "bootstrapvz.plugins.apt_proxy.tasks"}, {"name": "bootstrapvz.plugins.chef.tasks"}, {"name": "bootstrapvz.plugins.cloud_init.tasks"}, {"name": "bootstrapvz.plugins.image_commands.tasks"}, {"name": "bootstrapvz.plugins.minimize_size.tasks"}, {"name": "bootstrapvz.plugins.ntp.tasks"}, {"name": "bootstrapvz.plugins.opennebula.tasks"}, {"name": "bootstrapvz.plugins.prebootstrapped.tasks"}, {"name": "bootstrapvz.plugins.puppet.tasks"}, {"name": "bootstrapvz.plugins.root_password.tasks"}, {"name": "bootstrapvz.plugins.salt.tasks"}, {"name": "bootstrapvz.plugins.unattended_upgrades.tasks"}, {"name": "bootstrapvz.plugins.vagrant.tasks"}, {"name": "bootstrapvz.providers.azure.tasks.boot"}, {"name": "bootstrapvz.providers.azure.tasks.image"}, {"name": "bootstrapvz.providers.azure.tasks.packages"}, {"name": "bootstrapvz.providers.ec2.tasks.ami"}, {"name": "bootstrapvz.providers.ec2.tasks.boot"}, {"name": "bootstrapvz.providers.ec2.tasks.connection"}, {"name": "bootstrapvz.providers.ec2.tasks.ebs"}, {"name": "bootstrapvz.providers.ec2.tasks.filesystem"}, {"name": "bootstrapvz.providers.ec2.tasks.host"}, {"name": "bootstrapvz.providers.ec2.tasks.initd"}, {"name": "bootstrapvz.providers.ec2.tasks.network"}, {"name": "bootstrapvz.providers.ec2.tasks.packages"}, {"name": "bootstrapvz.providers.gce.tasks.apt"}, {"name": "bootstrapvz.providers.gce.tasks.boot"}, {"name": "bootstrapvz.providers.gce.tasks.configuration"}, {"name": "bootstrapvz.providers.gce.tasks.host"}, {"name": "bootstrapvz.providers.gce.tasks.image"}, {"name": "bootstrapvz.providers.gce.tasks.packages"}, {"name": "bootstrapvz.providers.kvm.tasks.packages"}, {"name": "bootstrapvz.providers.kvm.tasks.virtio"}, {"name": "bootstrapvz.providers.virtualbox.tasks.guest_additions"}, {"name": "bootstrapvz.providers.virtualbox.tasks.packages"}], "nodes": [{"phase": 0, "name": "AddDefaultSources", "module": 0}, {"phase": 0, "name": "AddManifestPreferences", "module": 0}, {"phase": 0, "name": "AddManifestSources", "module": 0}, {"phase": 7, "name": "AptClean", "module": 0}, {"phase": 5, "name": "AptUpdate", "module": 0}, {"phase": 5, "name": "AptUpgrade", "module": 0}, {"phase": 5, "name": "DisableDaemonAutostart", "module": 0}, {"phase": 7, "name": "EnableDaemonAutostart", "module": 0}, {"phase": 5, "name": "InstallTrustedKeys", "module": 0}, {"phase": 7, "name": "PurgeUnusedPackages", "module": 0}, {"phase": 5, "name": "WritePreferences", "module": 0}, {"phase": 5, "name": "WriteSources", "module": 0}, {"phase": 0, "name": "AddExtlinuxPackage", "module": 1}, {"phase": 0, "name": "AddGrubPackage", "module": 1}, {"phase": 6, "name": "BlackListModules", "module": 1}, {"phase": 6, "name": "ConfigureGrub", "module": 1}, {"phase": 6, "name": "DisableGetTTYs", "module": 1}, {"phase": 6, "name": "InstallExtLinux", "module": 1}, {"phase": 6, "name": "InstallGrub", "module": 1}, {"phase": 0, "name": "AddRequiredCommands", "module": 2}, {"phase": 4, "name": "Bootstrap", "module": 2}, {"phase": 0, "name": "ExcludePackagesInBootstrap", "module": 2}, {"phase": 0, "name": "IncludePackagesInBootstrap", "module": 2}, {"phase": 4, "name": "MakeTarball", "module": 2}, {"phase": 7, "name": "CleanTMP", "module": 3}, {"phase": 7, "name": "ClearMOTD", "module": 3}, {"phase": 10, "name": "TriggerRollback", "module": 4}, {"phase": 0, "name": "AddRequiredCommands", "module": 5}, {"phase": 0, "name": "AddXFSProgs", "module": 5}, {"phase": 3, "name": "CreateBootMountDir", "module": 5}, {"phase": 3, "name": "CreateMountDir", "module": 5}, {"phase": 8, "name": "DeleteMountDir", "module": 5}, {"phase": 6, "name": "FStab", "module": 5}, {"phase": 2, "name": "Format", "module": 5}, {"phase": 3, "name": "MountBoot", "module": 5}, {"phase": 3, "name": "MountRoot", "module": 5}, {"phase": 4, "name": "MountSpecials", "module": 5}, {"phase": 2, "name": "TuneVolumeFS", "module": 5}, {"phase": 8, "name": "UnmountRoot", "module": 5}, {"phase": 0, "name": "CheckExternalCommands", "module": 6}, {"phase": 6, "name": "AddExpandRoot", "module": 7}, {"phase": 6, "name": "AdjustExpandRootScript", "module": 7}, {"phase": 6, "name": "InstallInitScripts", "module": 7}, {"phase": 6, "name": "RemoveHWClock", "module": 7}, {"phase": 5, "name": "GenerateLocale", "module": 8}, {"phase": 0, "name": "LocaleBootstrapPackage", "module": 8}, {"phase": 6, "name": "SetTimezone", "module": 8}, {"phase": 0, "name": "AddRequiredCommands", "module": 9}, {"phase": 1, "name": "Create", "module": 9}, {"phase": 9, "name": "MoveImage", "module": 9}, {"phase": 6, "name": "ConfigureNetworkIF", "module": 10}, {"phase": 6, "name": "RemoveDNSInfo", "module": 10}, {"phase": 6, "name": "RemoveHostname", "module": 10}, {"phase": 6, "name": "SetHostname", "module": 10}, {"phase": 0, "name": "AddManifestPackages", "module": 11}, {"phase": 5, "name": "AddTaskselStandardPackages", "module": 11}, {"phase": 5, "name": "InstallPackages", "module": 11}, {"phase": 0, "name": "AddRequiredCommands", "module": 12}, {"phase": 2, "name": "MapPartitions", "module": 12}, {"phase": 2, "name": "PartitionVolume", "module": 12}, {"phase": 8, "name": "UnmapPartitions", "module": 12}, {"phase": 6, "name": "EnableShadowConfig", "module": 13}, {"phase": 0, "name": "AddOpenSSHPackage", "module": 14}, {"phase": 6, "name": "AddSSHKeyGeneration", "module": 14}, {"phase": 6, "name": "DisableSSHDNSLookup", "module": 14}, {"phase": 6, "name": "DisableSSHPasswordAuthentication", "module": 14}, {"phase": 7, "name": "ShredHostkeys", "module": 14}, {"phase": 1, "name": "Attach", "module": 15}, {"phase": 10, "name": "Delete", "module": 15}, {"phase": 8, "name": "Detach", "module": 15}, {"phase": 0, "name": "CreateWorkspace", "module": 16}, {"phase": 10, "name": "DeleteWorkspace", "module": 16}, {"phase": 0, "name": "AddSudoPackage", "module": 17}, {"phase": 6, "name": "AdminUserCredentials", "module": 17}, {"phase": 6, "name": "CreateAdminUser", "module": 17}, {"phase": 6, "name": "DisableRootLogin", "module": 17}, {"phase": 6, "name": "PasswordlessSudo", "module": 17}, {"phase": 7, "name": "RemoveAptProxy", "module": 18}, {"phase": 5, "name": "SetAptProxy", "module": 18}, {"phase": 0, "name": "AddPackages", "module": 19}, {"phase": 0, "name": "CheckAssetsPath", "module": 19}, {"phase": 6, "name": "CopyChefAssets", "module": 19}, {"phase": 0, "name": "AddBackports", "module": 20}, {"phase": 0, "name": "AddCloudInitPackages", "module": 20}, {"phase": 6, "name": "DisableModules", "module": 20}, {"phase": 5, "name": "SetMetadataSource", "module": 20}, {"phase": 6, "name": "SetUsername", "module": 20}, {"phase": 6, "name": "ImageExecuteCommand", "module": 21}, {"phase": 4, "name": "AddFolderMounts", "module": 22}, {"phase": 0, "name": "AddRequiredCommands", "module": 22}, {"phase": 7, "name": "RemoveFolderMounts", "module": 22}, {"phase": 8, "name": "ShrinkVolume", "module": 22}, {"phase": 8, "name": "Zerofree", "module": 22}, {"phase": 5, "name": "AddNtpPackage", "module": 23}, {"phase": 6, "name": "SetNtpServers", "module": 23}, {"phase": 0, "name": "AddBackports", "module": 24}, {"phase": 0, "name": "AddONEContextPackage", "module": 24}, {"phase": 5, "name": "CopyImage", "module": 25}, {"phase": 1, "name": "CreateFromImage", "module": 25}, {"phase": 1, "name": "CreateFromSnapshot", "module": 25}, {"phase": 5, "name": "Snapshot", "module": 25}, {"phase": 0, "name": "AddPackages", "module": 26}, {"phase": 6, "name": "ApplyPuppetManifest", "module": 26}, {"phase": 0, "name": "CheckAssetsPath", "module": 26}, {"phase": 0, "name": "CheckManifestPath", "module": 26}, {"phase": 6, "name": "CopyPuppetAssets", "module": 26}, {"phase": 6, "name": "EnableAgent", "module": 26}, {"phase": 6, "name": "SetRootPassword", "module": 27}, {"phase": 5, "name": "BootstrapSaltMinion", "module": 28}, {"phase": 5, "name": "InstallSaltDependencies", "module": 28}, {"phase": 6, "name": "SetSaltGrains", "module": 28}, {"phase": 0, "name": "AddUnattendedUpgradesPackage", "module": 29}, {"phase": 6, "name": "EnablePeriodicUpgrades", "module": 29}, {"phase": 6, "name": "AddInsecurePublicKey", "module": 30}, {"phase": 0, "name": "AddPackages", "module": 30}, {"phase": 0, "name": "CheckBoxPath", "module": 30}, {"phase": 0, "name": "CreateVagrantBoxDir", "module": 30}, {"phase": 6, "name": "CreateVagrantUser", "module": 30}, {"phase": 9, "name": "PackageBox", "module": 30}, {"phase": 6, "name": "PasswordlessSudo", "module": 30}, {"phase": 10, "name": "RemoveVagrantBoxDir", "module": 30}, {"phase": 6, "name": "SetRootPassword", "module": 30}, {"phase": 6, "name": "ConfigureGrub", "module": 31}, {"phase": 9, "name": "ConvertToVhd", "module": 32}, {"phase": 0, "name": "DefaultPackages", "module": 33}, {"phase": 5, "name": "Waagent", "module": 33}, {"phase": 0, "name": "AMIName", "module": 34}, {"phase": 9, "name": "BundleImage", "module": 34}, {"phase": 9, "name": "RegisterAMI", "module": 34}, {"phase": 10, "name": "RemoveBundle", "module": 34}, {"phase": 9, "name": "UploadImage", "module": 34}, {"phase": 6, "name": "ConfigurePVGrub", "module": 35}, {"phase": 0, "name": "Connect", "module": 36}, {"phase": 0, "name": "GetCredentials", "module": 36}, {"phase": 1, "name": "Attach", "module": 37}, {"phase": 1, "name": "Create", "module": 37}, {"phase": 9, "name": "Snapshot", "module": 37}, {"phase": 6, "name": "S3FStab", "module": 38}, {"phase": 0, "name": "AddExternalCommands", "module": 39}, {"phase": 0, "name": "GetInstanceMetadata", "module": 39}, {"phase": 0, "name": "SetRegion", "module": 39}, {"phase": 6, "name": "AddEC2InitScripts", "module": 40}, {"phase": 0, "name": "AddBuildEssentialPackage", "module": 41}, {"phase": 6, "name": "EnableDHCPCDDNS", "module": 41}, {"phase": 5, "name": "InstallEnhancedNetworking", "module": 41}, {"phase": 0, "name": "DefaultPackages", "module": 42}, {"phase": 7, "name": "CleanGoogleRepositoriesAndKeys", "module": 43}, {"phase": 5, "name": "ImportGoogleKey", "module": 43}, {"phase": 0, "name": "SetPackageRepositories", "module": 43}, {"phase": 6, "name": "ConfigureGrub", "module": 44}, {"phase": 6, "name": "GatherReleaseInformation", "module": 45}, {"phase": 6, "name": "DisableIPv6", "module": 46}, {"phase": 6, "name": "SetHostname", "module": 46}, {"phase": 9, "name": "CreateTarball", "module": 47}, {"phase": 9, "name": "RegisterImage", "module": 47}, {"phase": 9, "name": "UploadImage", "module": 47}, {"phase": 0, "name": "DefaultPackages", "module": 48}, {"phase": 0, "name": "GooglePackages", "module": 48}, {"phase": 5, "name": "InstallGSUtil", "module": 48}, {"phase": 0, "name": "DefaultPackages", "module": 49}, {"phase": 6, "name": "VirtIO", "module": 50}, {"phase": 5, "name": "AddGuestAdditionsPackages", "module": 51}, {"phase": 0, "name": "CheckGuestAdditionsPath", "module": 51}, {"phase": 5, "name": "InstallGuestAdditions", "module": 51}, {"phase": 0, "name": "DefaultPackages", "module": 52}], "links": [{"source": 19, "target": 39, "definer": 19}, {"source": 21, "target": 20, "definer": 21}, {"source": 22, "target": 20, "definer": 22}, {"source": 27, "target": 39, "definer": 27}, {"source": 38, "target": 69, "definer": 38}, {"source": 40, "target": 42, "definer": 40}, {"source": 43, "target": 42, "definer": 43}, {"source": 47, "target": 39, "definer": 47}, {"source": 48, "target": 67, "definer": 48}, {"source": 55, "target": 56, "definer": 55}, {"source": 57, "target": 39, "definer": 57}, {"source": 58, "target": 33, "definer": 58}, {"source": 60, "target": 69, "definer": 60}, {"source": 63, "target": 42, "definer": 63}, {"source": 68, "target": 71, "definer": 68}, {"source": 78, "target": 4, "definer": 78}, {"source": 85, "target": 4, "definer": 85}, {"source": 89, "target": 39, "definer": 89}, {"source": 90, "target": 3, "definer": 90}, {"source": 92, "target": 60, "definer": 92}, {"source": 92, "target": 69, "definer": 92}, {"source": 93, "target": 56, "definer": 93}, {"source": 98, "target": 67, "definer": 98}, {"source": 99, "target": 134, "definer": 99}, {"source": 102, "target": 52, "definer": 102}, {"source": 102, "target": 51, "definer": 102}, {"source": 120, "target": 71, "definer": 120}, {"source": 122, "target": 18, "definer": 122}, {"source": 129, "target": 71, "definer": 129}, {"source": 138, "target": 39, "definer": 138}, {"source": 141, "target": 42, "definer": 141}, {"source": 146, "target": 3, "definer": 146}, {"source": 147, "target": 11, "definer": 147}, {"source": 148, "target": 2, "definer": 148}, {"source": 149, "target": 18, "definer": 149}, {"source": 161, "target": 56, "definer": 161}, {"source": 2, "target": 0, "definer": 0}, {"source": 44, "target": 4, "definer": 4}, {"source": 11, "target": 4, "definer": 4}, {"source": 4, "target": 5, "definer": 5}, {"source": 6, "target": 5, "definer": 5}, {"source": 11, "target": 10, "definer": 10}, {"source": 8, "target": 11, "definer": 11}, {"source": 0, "target": 12, "definer": 12}, {"source": 0, "target": 13, "definer": 13}, {"source": 32, "target": 15, "definer": 15}, {"source": 32, "target": 17, "definer": 17}, {"source": 32, "target": 18, "definer": 18}, {"source": 23, "target": 20, "definer": 20}, {"source": 0, "target": 28, "definer": 28}, {"source": 35, "target": 29, "definer": 29}, {"source": 38, "target": 31, "definer": 31}, {"source": 29, "target": 34, "definer": 34}, {"source": 30, "target": 35, "definer": 35}, {"source": 20, "target": 36, "definer": 36}, {"source": 33, "target": 37, "definer": 37}, {"source": 42, "target": 41, "definer": 41}, {"source": 0, "target": 54, "definer": 54}, {"source": 4, "target": 55, "definer": 55}, {"source": 5, "target": 56, "definer": 56}, {"source": 59, "target": 58, "definer": 58}, {"source": 38, "target": 60, "definer": 60}, {"source": 0, "target": 62, "definer": 62}, {"source": 0, "target": 72, "definer": 72}, {"source": 42, "target": 73, "definer": 73}, {"source": 0, "target": 79, "definer": 79}, {"source": 0, "target": 83, "definer": 83}, {"source": 82, "target": 83, "definer": 83}, {"source": 44, "target": 85, "definer": 85}, {"source": 20, "target": 88, "definer": 88}, {"source": 69, "target": 91, "definer": 91}, {"source": 38, "target": 92, "definer": 92}, {"source": 0, "target": 96, "definer": 96}, {"source": 95, "target": 96, "definer": 96}, {"source": 56, "target": 97, "definer": 97}, {"source": 163, "target": 97, "definer": 97}, {"source": 56, "target": 100, "definer": 100}, {"source": 163, "target": 100, "definer": 100}, {"source": 0, "target": 101, "definer": 101}, {"source": 105, "target": 102, "definer": 102}, {"source": 56, "target": 108, "definer": 108}, {"source": 0, "target": 109, "definer": 109}, {"source": 0, "target": 111, "definer": 111}, {"source": 117, "target": 113, "definer": 113}, {"source": 0, "target": 114, "definer": 114}, {"source": 70, "target": 116, "definer": 116}, {"source": 115, "target": 116, "definer": 116}, {"source": 0, "target": 124, "definer": 124}, {"source": 56, "target": 125, "definer": 125}, {"source": 132, "target": 126, "definer": 126}, {"source": 136, "target": 128, "definer": 128}, {"source": 130, "target": 128, "definer": 128}, {"source": 127, "target": 130, "definer": 130}, {"source": 133, "target": 132, "definer": 132}, {"source": 139, "target": 132, "definer": 132}, {"source": 140, "target": 132, "definer": 132}, {"source": 135, "target": 134, "definer": 134}, {"source": 0, "target": 142, "definer": 142}, {"source": 0, "target": 145, "definer": 145}, {"source": 8, "target": 147, "definer": 147}, {"source": 50, "target": 151, "definer": 151}, {"source": 49, "target": 153, "definer": 153}, {"source": 155, "target": 154, "definer": 154}, {"source": 153, "target": 155, "definer": 155}, {"source": 0, "target": 156, "definer": 156}, {"source": 156, "target": 157, "definer": 157}, {"source": 0, "target": 159, "definer": 159}, {"source": 56, "target": 163, "definer": 163}, {"source": 0, "target": 164, "definer": 164}]} \ No newline at end of file diff --git a/docs/_static/taskoverview.coffee b/docs/_static/taskoverview.coffee index d9d3b94..f4e1f25 100644 --- a/docs/_static/taskoverview.coffee +++ b/docs/_static/taskoverview.coffee @@ -1,10 +1,10 @@ class window.TaskOverview viewBoxHeight = 800 - viewBoxWidth = 200 + viewBoxWidth = 800 margins = - top: 100 + top: 200 left: 50 - bottom: 100 + bottom: 200 right: 50 gravity = lateral: .1 @@ -26,7 +26,7 @@ class window.TaskOverview constructor: ({@selector}) -> @svg = d3.select(@selector) .attr('viewBox', "0 0 #{viewBoxWidth} #{viewBoxHeight}") - d3.json '_static/graph.json', @buildGraph + d3.json '../_static/graph.json', @buildGraph buildGraph: (error, @data) => @createDefinitions() @@ -118,15 +118,23 @@ class window.TaskOverview .selectAll('line').data(layout.links()).enter() .append('line').attr('marker-end', 'url(#right-arrowhead)') + mouseOver = (d) -> + labels.classed 'hover', (l) -> d is l + nodes.classed 'highlight', (n) -> d.module is n.module + + mouseOut = (d) -> + labels.classed 'hover', no + nodes.classed 'highlight', no + nodes = @svg.append('g').attr('class', 'nodes') .selectAll('g.partition').data(groups).enter() .append('g').attr('class', 'partition') .selectAll('circle').data((d) -> d.values).enter() .append('circle').attr('r', (d) -> d.radius) - .style('fill', (d, i) -> nodeColors(d[nodeColorKey])) + .style('fill', (d) -> nodeColors(d[nodeColorKey])) .call(layout.drag) - .on('mouseover', (d) -> (labels.filter (l) -> d is l).classed 'hover', true) - .on('mouseout', (d) -> (labels.filter (l) -> d is l).classed 'hover', false) + .on('mouseover', mouseOver) + .on('mouseout', mouseOut) labels = @svg.append('g').attr('class', 'node-labels') .selectAll('g.partition').data(groups).enter() diff --git a/docs/_static/taskoverview.less b/docs/_static/taskoverview.less index 2903816..8c1e67c 100644 --- a/docs/_static/taskoverview.less +++ b/docs/_static/taskoverview.less @@ -6,6 +6,11 @@ } g.nodes circle { stroke: #000000; + &.highlight { + stroke: #555599; + stroke-width: 2.5px; + fill: #EEAAAA !important; + } opacity: .9; stroke-width: 1.5px; } diff --git a/docs/base/fs.rst b/docs/api/base/fs.rst similarity index 100% rename from docs/base/fs.rst rename to docs/api/base/fs.rst diff --git a/docs/base/index.rst b/docs/api/base/index.rst similarity index 98% rename from docs/base/index.rst rename to docs/api/base/index.rst index d80e702..c17d336 100644 --- a/docs/base/index.rst +++ b/docs/api/base/index.rst @@ -7,9 +7,9 @@ and handles the gather, sorting and running of tasks. .. toctree:: :maxdepth: 2 + :glob: - fs - pkg + * Bootstrap information --------------------- diff --git a/docs/base/pkg.rst b/docs/api/base/pkg.rst similarity index 96% rename from docs/base/pkg.rst rename to docs/api/base/pkg.rst index 1a8db8b..4625358 100644 --- a/docs/base/pkg.rst +++ b/docs/api/base/pkg.rst @@ -16,7 +16,7 @@ Sources list :private-members: Preferences list ------------- +---------------- .. automodule:: bootstrapvz.base.pkg.preferenceslist :members: :private-members: diff --git a/docs/common/fs.rst b/docs/api/common/fs.rst similarity index 100% rename from docs/common/fs.rst rename to docs/api/common/fs.rst diff --git a/docs/common/index.rst b/docs/api/common/index.rst similarity index 100% rename from docs/common/index.rst rename to docs/api/common/index.rst diff --git a/docs/common/tasks/index.rst b/docs/api/common/tasks/index.rst similarity index 100% rename from docs/common/tasks/index.rst rename to docs/api/common/tasks/index.rst diff --git a/docs/api/index.rst b/docs/api/index.rst new file mode 100644 index 0000000..f00ef99 --- /dev/null +++ b/docs/api/index.rst @@ -0,0 +1,10 @@ +API +=== + + +.. toctree:: + :maxdepth: 1 + :hidden: + + base/index + common/index diff --git a/docs/changelog.rst b/docs/changelog.rst new file mode 100644 index 0000000..565b052 --- /dev/null +++ b/docs/changelog.rst @@ -0,0 +1 @@ +.. include:: ../CHANGELOG.rst diff --git a/docs/conf.py b/docs/conf.py index 6eb7717..a53c41e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,7 +30,8 @@ sys.path.insert(0, os.path.abspath(os.pardir)) # ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', + 'sphinx.ext.linkcode', + 'docs.transform_github_links', ] # Add any paths that contain templates here, relative to this directory. @@ -261,3 +262,88 @@ texinfo_documents = [('index', 'bootstrap-vz', u'bootstrap-vz Documentation', # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False + + +# -- Link to rst files scattered throughout the project ------------------- + +import glob +import os.path + +for readme_path in glob.glob('../bootstrapvz/providers/*/README.rst'): + provider_name = os.path.basename(os.path.dirname(readme_path)) + include_path = os.path.join('providers', provider_name + '.rst') + if not os.path.exists(include_path): + path_to_readme = os.path.join('../../bootstrapvz/providers', provider_name, 'README.rst') + with open(include_path, 'w') as include: + include.write('.. include:: ' + path_to_readme) + + +for readme_path in glob.glob('../bootstrapvz/plugins/*/README.rst'): + plugin_name = os.path.basename(os.path.dirname(readme_path)) + include_path = os.path.join('plugins', plugin_name + '.rst') + if not os.path.exists(include_path): + path_to_readme = os.path.join('../../bootstrapvz/plugins', plugin_name, 'README.rst') + with open(include_path, 'w') as include: + include.write('.. include:: ' + path_to_readme) + + +# -- Create task overview graph data -------------------------------------- + +from docs import taskoverview + +data = taskoverview.generate_graph_data() +taskoverview.write_data(data, '_static/graph.json') + + +# -- Substitute links for github with relative links in readthedocs ------- + + +if on_rtd: + pass + +# Snatched from here: +# https://sourcegraph.com/github.com/Gallopsled/pwntools@master/.PipPackage/pwntools/.def/docs/source/conf/linkcode_resolve/lines +baseurl = 'https://github.com/andsens/bootstrap-vz' + +import subprocess +try: + git_head = subprocess.check_output('git describe --tags 2>/dev/null', shell=True) +except subprocess.CalledProcessError: + try: + git_head = subprocess.check_output('git rev-parse HEAD', shell=True).strip()[:10] + except subprocess.CalledProcessError: + pass + + +def linkcode_resolve(domain, info): + if domain != 'py': + return None + if not info['module']: + return None + + filepath = info['module'].replace('.', '/') + '.py' + fmt_args = {'baseurl': baseurl, + 'commit': git_head, + 'path': filepath} + + import importlib + import inspect + import types + module = importlib.import_module(info['module']) + value = module + for part in info['fullname'].split('.'): + value = getattr(value, part, None) + if value is None: + break + valid_types = (types.ModuleType, types.ClassType, types.MethodType, + types.FunctionType, types.TracebackType, + types.FrameType, types.CodeType) + if isinstance(value, valid_types): + try: + lines, first = inspect.getsourcelines(value) + fmt_args['linestart'] = first + fmt_args['lineend'] = first + len(lines) - 1 + return '{baseurl}/blob/{commit}/{path}#L{linestart}-L{lineend}'.format(**fmt_args) + except IOError: + pass + return '{baseurl}/blob/{commit}/{path}'.format(**fmt_args) diff --git a/docs/developers/contributing.rst b/docs/developers/contributing.rst new file mode 100644 index 0000000..ac7b6bc --- /dev/null +++ b/docs/developers/contributing.rst @@ -0,0 +1 @@ +.. include:: ../../CONTRIBUTING.rst diff --git a/docs/developers/documentation.rst b/docs/developers/documentation.rst new file mode 100644 index 0000000..72a3355 --- /dev/null +++ b/docs/developers/documentation.rst @@ -0,0 +1 @@ +.. include:: ../README.rst diff --git a/docs/developers/index.rst b/docs/developers/index.rst new file mode 100644 index 0000000..fa243d4 --- /dev/null +++ b/docs/developers/index.rst @@ -0,0 +1,13 @@ +Developers +========== + +.. toctree:: + :maxdepth: 1 + :hidden: + + contributing + documentation + switches + taskoverview + +.. include:: ../../bootstrapvz/README.rst diff --git a/docs/switches.rst b/docs/developers/switches.rst similarity index 100% rename from docs/switches.rst rename to docs/developers/switches.rst diff --git a/docs/taskoverview.rst b/docs/developers/taskoverview.rst similarity index 76% rename from docs/taskoverview.rst rename to docs/developers/taskoverview.rst index e84680e..2e76e19 100644 --- a/docs/taskoverview.rst +++ b/docs/developers/taskoverview.rst @@ -5,11 +5,11 @@ Taskoverview .. raw:: html - + - + diff --git a/docs/guidelines.rst b/docs/guidelines.rst deleted file mode 100644 index 0d4f81d..0000000 --- a/docs/guidelines.rst +++ /dev/null @@ -1,111 +0,0 @@ - -Development guidelines -====================== -The following guidelines should serve as general advice when -developing providers or plugins for bootstrap-vz. Keep in mind that -these guidelines are not rules , they are advice on how to better add -value to the bootstrap-vz codebase. - - -+ **The manifest should always fully describe the resulting image. The - outcome of a bootstrapping process should never depend on settings - specified elsewhere.** - - This allows others to easily reproduce any - setup other people are running and makes it possible to share - manifests. `The official debian EC2 images `_ - for example can be reproduced using the manifests available - in the manifest directory of bootstrap-vz. - -+ **The bootstrapper should always be able to run fully unattended.** - - For end users, this guideline minimizes the risk of errors. Any - required input would also be in direct conflict with the previous - guideline that the manifest should always fully describe the resulting - image. - - Additionally developers may have to run the bootstrap - process multiple times though, any prompts in the middle of that - process may significantly slow down the development speed. - -+ **The bootstrapper should only need as much setup as the manifest - requires.** - - Having to shuffle specific paths on the host into place - (e.g. ``/target`` has to be created manually) to get the bootstrapper - running is going to increase the rate of errors made by users. - Aim for minimal setup. - - Exceptions are of course things such as the path to - the VirtualBox Guest Additions ISO or tools like ``parted`` that - need to be installed on the host. - -+ **Roll complexity into which tasks are added to the tasklist.** - - If a ``run()`` function checks whether it should do any work or simply be - skipped, consider doing that check in ``resolve_tasks()`` instead and - avoid adding that task alltogether. This allows people looking at the - tasklist in the logfile to determine what work has been performed. If - a task says it will modify a file but then bails , a developer may get - confused when looking at that file after bootstrapping. He could - conclude that the file has either been overwritten or that the - search & replace does not work correctly. - -+ **Control flow should be directed from the task graph.** - - Avoid creating complicated ``run()`` functions. If necessary, split up - a function into two semantically separate tasks. - - This allows other tasks to interleave with the control-flow and add extended - functionality (e.g. because volume creation and mounting are two - separate tasks, `the prebootstrapped plugin - `_ - can replace the volume creation task with a task of its own that - creates a volume from a snapshot instead, but still reuse the mount task). - -+ **Task classes should be treated as decorated run() functions, they - should not have any state** - - Thats what the BootstrapInformation object is for. - -+ **Only add stuff to the BootstrapInformation object when really necessary.** - - This is mainly to avoid clutter. - -+ **Use a json-schema to check for allowed settings** - The json-schema may be verbose but it keeps the bulk of check work outside the - python code, which is a big plus when it comes to readability. This of - course only applies bas long as the checks are simple. You can of - course fall back to doing the check in python when that solution is - considerably less complex. - -+ **When invoking external programs, use long options whenever possible** - - This makes the commands a lot easier to understand, since - the option names usually hint at what they do. - -+ **When invoking external programs, don't use full paths, rely on ``$PATH``** - - This increases robustness when executable locations change. - Example: Use ``log_call(['wget', ...])`` instead of ``log_call(['/usr/bin/wget', ...])``. - - -Coding style ------------- -bootstrap-vz is coded to comply closely with the PEP8 style -guidelines. There however a few exceptions: - -+ Max line length is 110 chars, not 80. -+ Multiple assignments may be aligned with spaces so that the = match - vertically. -+ Ignore ``E101``: Indent with tabs and align with spaces -+ Ignore ``E221 & E241``: Alignment of assignments -+ Ignore ``E501``: The max line length is not 80 characters -+ Ignore ``W191``: Indent with tabs not spaces - -The codebase can be checked for any violations quite easily, since those rules are already specified in the -`tox `_ configuration file. -:: - - tox -e flake8 diff --git a/docs/index.rst b/docs/index.rst index 5413191..bdca053 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,30 +1,17 @@ -.. bootstrap-vz documentation master file, created by - sphinx-quickstart on Sun Mar 23 16:17:28 2014. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to bootstrap-vz's documentation! -======================================== - -Contents: - .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + :hidden: - base/index - common/index - plugins/index + self + manifest providers/index - guidelines - taskoverview - howitworks - switches + plugins/index + supported_builds logging + remote_bootstrapping + changelog + developers/index + api/index + testing/index -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - +.. include:: ../README.rst diff --git a/docs/manifest.rst b/docs/manifest.rst new file mode 100644 index 0000000..d8b3cef --- /dev/null +++ b/docs/manifest.rst @@ -0,0 +1 @@ +.. include:: ../manifests/README.rst diff --git a/docs/plugins/.gitignore b/docs/plugins/.gitignore new file mode 100644 index 0000000..50db996 --- /dev/null +++ b/docs/plugins/.gitignore @@ -0,0 +1,3 @@ +* +!index.rst +!.gitignore diff --git a/docs/plugins/index.rst b/docs/plugins/index.rst index 690ccbc..1d62e4f 100644 --- a/docs/plugins/index.rst +++ b/docs/plugins/index.rst @@ -1,3 +1,11 @@ - Plugins ======= + +.. toctree:: + :maxdepth: 1 + :hidden: + :glob: + + * + +.. include:: ../../bootstrapvz/plugins/README.rst diff --git a/docs/providers/.gitignore b/docs/providers/.gitignore new file mode 100644 index 0000000..50db996 --- /dev/null +++ b/docs/providers/.gitignore @@ -0,0 +1,3 @@ +* +!index.rst +!.gitignore diff --git a/docs/providers/index.rst b/docs/providers/index.rst index ef70c42..5b11a93 100644 --- a/docs/providers/index.rst +++ b/docs/providers/index.rst @@ -1,3 +1,11 @@ - Providers ========= + +.. toctree:: + :maxdepth: 1 + :hidden: + :glob: + + * + +.. include:: ../../bootstrapvz/providers/README.rst diff --git a/docs/remote_bootstrapping.rst b/docs/remote_bootstrapping.rst new file mode 100644 index 0000000..c1251fc --- /dev/null +++ b/docs/remote_bootstrapping.rst @@ -0,0 +1 @@ +.. include:: ../bootstrapvz/remote/README.rst diff --git a/docs/supported_builds.rst b/docs/supported_builds.rst new file mode 100644 index 0000000..3b03ba7 --- /dev/null +++ b/docs/supported_builds.rst @@ -0,0 +1,73 @@ +Supported builds +================ + +The following is a list of supported manifest combinations. + +Note that grub cannot boot from unpartitioned volumes. + +Additionally grub installation is not supported on *squeeze*. +This is not a technical limitation, but simply stems from a +lack of motivation to implement support for it. + +Azure +----- + +TODO + + +EC2 +--- + +EBS (wheezy & jessie) +~~~~~~~~~~~~~~~~~~~~~ + +========================== ================= ================= ================= + Bootloader / Partitioning none msdos gpt +========================== ================= ================= ================= + pvgrub (paravirtualized) supported supported supported + extlinux (hvm) supported supported supported + grub (hvm) *not supported* supported supported +========================== ================= ================= ================= + +EBS (squeeze) +~~~~~~~~~~~~~ + +========================== ================= ================= ================= + Bootloader / Partitioning none msdos gpt +========================== ================= ================= ================= + pvgrub (paravirtualized) supported supported supported + extlinux (hvm) supported supported supported + grub (hvm) *not supported* *not implemented* *not implemented* +========================== ================= ================= ================= + +S3 (all releases) +~~~~~~~~~~~~~~~~~ + +========================== ================= ================= ================= + Bootloader / Partitioning none msdos gpt +========================== ================= ================= ================= + pvgrub (paravirtualized) supported *not implemented* *not implemented* + extlinux (hvm) *not implemented* *not implemented* *not implemented* + grub (hvm) *not supported* *not implemented* *not implemented* +========================== ================= ================= ================= + +GCE +--- + +TODO + +KVM +--- + +TODO + + +VirtualBox +---------- + +========================== ================= ================= ================= + Bootloader / Partitioning none msdos gpt +========================== ================= ================= ================= + extlinux supported supported supported + grub *not supported* supported supported +========================== ================= ================= ================= diff --git a/taskoverview.py b/docs/taskoverview.py similarity index 90% rename from taskoverview.py rename to docs/taskoverview.py index f9811b1..97ecbb0 100755 --- a/taskoverview.py +++ b/docs/taskoverview.py @@ -1,7 +1,11 @@ #!/usr/bin/python +import sys +import os.path + +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) -def main(opts): +def generate_graph_data(): from bootstrapvz.base.tasklist import get_all_tasks tasks = get_all_tasks() @@ -45,13 +49,11 @@ def main(opts): link[key] = tasks.index(link[key]) return link - data = {'phases': map(mk_phase, phases.order), + return {'phases': map(mk_phase, phases.order), 'modules': map(mk_module, modules), 'nodes': map(mk_node, tasks), 'links': map(mk_link, task_links)} - write_data(data, opts.get('--output', None)) - def write_data(data, output_path=None): import json @@ -73,4 +75,5 @@ if __name__ == '__main__' and __package__ is None: """ opts = docopt(usage) - main(opts) + data = generate_graph_data() + write_data(data, opts.get('--output', None)) diff --git a/docs/testing/index.rst b/docs/testing/index.rst new file mode 100644 index 0000000..5c0b339 --- /dev/null +++ b/docs/testing/index.rst @@ -0,0 +1,12 @@ +Testing +======= + +.. toctree:: + :maxdepth: 1 + :hidden: + + unit_tests + integration_tests + integration_test_providers + +.. include:: ../../tests/README.rst diff --git a/docs/testing/integration_test_providers.rst b/docs/testing/integration_test_providers.rst new file mode 100644 index 0000000..c81153e --- /dev/null +++ b/docs/testing/integration_test_providers.rst @@ -0,0 +1 @@ +.. include:: ../../tests/integration/providers/README.rst diff --git a/docs/testing/integration_tests.rst b/docs/testing/integration_tests.rst new file mode 100644 index 0000000..73d05c4 --- /dev/null +++ b/docs/testing/integration_tests.rst @@ -0,0 +1 @@ +.. include:: ../../tests/integration/README.rst diff --git a/docs/testing/unit_tests.rst b/docs/testing/unit_tests.rst new file mode 100644 index 0000000..418ca76 --- /dev/null +++ b/docs/testing/unit_tests.rst @@ -0,0 +1 @@ +.. include:: ../../tests/unit/README.rst diff --git a/docs/transform_github_links.py b/docs/transform_github_links.py new file mode 100644 index 0000000..7fc3469 --- /dev/null +++ b/docs/transform_github_links.py @@ -0,0 +1,100 @@ +import re + + +def setup(app): + app.connect('doctree-resolved', transform_github_links) + return {'version': '0.1'} + +# Maps from files in docs/ to folders/files in repo +includes_mapping = { + r'^index$': r'', + r'^(providers|plugins)/index$': r'bootstrapvz/\1/', + r'^(providers|plugins)/(?!index)([^/]+)$': r'bootstrapvz/\1/\2/', + r'^manifest$': r'manifests/', + r'^testing/index$': r'tests/', + r'^testing/(?!index)([^/]+)_tests$': r'tests/\1/', + r'^remote_bootstrapping$': r'bootstrapvz/remote/', + r'^developers/index$': r'bootstrapvz/', + r'^developers/contributing$': r'CONTRIBUTING.rst', + r'^developers/documentation$': r'docs/', + r'^changelog$': r'CHANGELOG.rst', +} + + +# Maps from links in repo to files/folders in docs/ +links_mapping = { + r'^$': r'', + r'^bootstrapvz/(providers|plugins)$': r'\1', + r'^bootstrapvz/(providers|plugins)/([^/]+)$': r'\1/\2.html', + r'^tests$': r'testing', + r'^manifests$': r'manifest.html', + r'^tests/([^/]+)$': r'testing/\1_tests.html', + r'^bootstrapvz/remote$': r'remote_bootstrapping.html', + r'^bootstrapvz$': r'developers', + r'^CONTRIBUTING\.rst$': r'developers/contributing.html', + r'^docs$': r'developers/documentation.html', + r'^CHANGELOG\.rst$': r'changelog.html', +} + +for key, val in includes_mapping.items(): + del includes_mapping[key] + includes_mapping[re.compile(key)] = val + +for key, val in links_mapping.items(): + del links_mapping[key] + links_mapping[re.compile(key)] = val + + +def find_original(path): + for key, val in includes_mapping.items(): + if re.match(key, path): + return re.sub(key, val, path) + return None + + +def find_docs_link(link): + try: + # Preserve anchor when doing lookups + link, anchor = link.split('#', 1) + anchor = '#' + anchor + except ValueError: + # No anchor, keep the original link + anchor = '' + for key, val in links_mapping.items(): + if re.match(key, link): + return re.sub(key, val, link) + anchor + return None + + +def transform_github_links(app, doctree, fromdocname): + # Convert relative links in repo into relative links in docs. + # We do this by first figuring out whether the current document + # has been included from outside docs/ and only continue if so. + # Next we take the repo path matching the current document + # (lookup through 'includes_mapping'), tack the link onto the dirname + # of that path and normalize it using os.path.normpath. + # The result is the path to a document/folder in the repo. + # We then convert this path into one that works in the documentation + # (lookup through 'links_mapping'). + # If a mapping is found we, create a relative link from the current document. + + from docutils import nodes + import os.path + original_path = find_original(fromdocname) + if original_path is None: + return + + for node in doctree.traverse(nodes.reference): + if 'refuri' not in node: + continue + if node['refuri'].startswith('http'): + continue + abs_link = os.path.normpath(os.path.join(os.path.dirname(original_path), node['refuri'])) + docs_link = find_docs_link(abs_link) + if docs_link is None: + continue + # special handling for when we link inside the same document + if docs_link.startswith('#'): + node['refuri'] = docs_link + else: + node['refuri'] = os.path.relpath(docs_link, os.path.dirname(fromdocname)) diff --git a/manifests/README.rst b/manifests/README.rst new file mode 100644 index 0000000..da2c08a --- /dev/null +++ b/manifests/README.rst @@ -0,0 +1,239 @@ +Manifest +======== +The manifest file is the primary way to interact with bootstrap-vz. +Every configuration and customization of a Debian installation is specified in this file. + +The manifest format is YAML or JSON. It is near impossible to run the +bootstrapper with an invalid configuration, since every part of the +framework supplies a `json-schema `__ that +specifies exactly which configuration settings are valid in different +situations. + +Manifest variables +------------------ + +Many of the settings in the example manifests use strings like +``debian-{system.release}-{system.architecture}-{{"{%y"}}}{{"{%m"}}}{{"{%d"}}}``. +These strings make use of manifest variables, which can cross reference +other settings in the manifest or specific values supplied by the +bootstrapper (e.g. all python date formatting variables are available). + +Any reference uses dots to specify a path to the desired manifest +setting. Not all settings support this though, to see whether embedding +a manifest variable in a setting is possible, look for the +``manifest vars`` label. + +Sections +-------- + +The manifest is split into 7 sections. + +Provider +~~~~~~~~ + +The provider section contains all provider specific settings and the +name of the provider itself. + +- ``name``: target virtualization platform of the installation + ``required`` + +Consult the `providers <../bootstrapvz/providers>`__ section of the documentation +for a list of valid values. + +Bootstrapper +~~~~~~~~~~~~ + +This section concerns the bootstrapper itself and its behavior. There +are 4 possible settings: + +- ``workspace``: Path to where the bootstrapper should place images + and intermediate files. Any volumes will be mounted under that path. + ``required`` +- ``tarball``: debootstrap has the option to download all the + software and pack it up in a tarball. When starting the actual + bootstrapping process, debootstrap can then be pointed at that + tarball and use it instead of downloading anything from the internet. + If you plan on running the bootstrapper multiple times, this option + can save you a lot of bandwidth and time. This option just specifies + whether it should create a new tarball or not. It will search for and + use an available tarball if it already exists, regardless of this + setting. + ``optional`` + Valid values: ``true, false`` + Default: ``false`` +- ``mirror``: The mirror debootstrap should download software from. + It is advisable to specify a mirror close to your location (or the + location of the host you are bootstrapping on), to decrease latency + and improve bandwidth. If not specified, `the configured aptitude + mirror URL <#packages>`__ is used. + ``optional`` +- ``include_packages``: Extra packages to be installed during + bootstrap. Accepts a list of package names. + ``optional`` +- ``exclude_packages``: Packages to exclude during bootstrap phase. + Accepts a list of package names. + ``optional`` +- ``guest_additions``: This setting is only relevant for the + `virtualbox provider <../bootstrapvz/providers/virtualbox>`__. + It specifies the path to the VirtualBox Guest Additions ISO, which, when specified, + will be mounted and used to install the VirtualBox Guest Additions. + ``optional`` + +Image +~~~~~ + +The image section configures anything pertaining directly to the image +that will be created. + +- ``name``: The name of the resulting image. + When bootstrapping cloud images, this would be the name visible in + the interface when booting up new instances. + When bootstrapping for VirtualBox or kvm, it's the filename of the + image. + ``required`` + ``manifest vars`` +- ``description``: Description of the image. Where this setting is + used depends highly on which provider is set. At the moment it is + only used for AWS images. + ``required for ec2 provider`` + ``manifest vars`` +- ``bucket``: When bootstrapping an S3 backed image for AWS, this + will be the bucket where the image is uploaded to. + ``required for S3 backing`` +- ``region``: Region in which the AMI should be registered. + ``required for S3 backing`` + +System +~~~~~~ + +This section defines anything that pertains directly to the bootstrapped +system and does not fit under any other section. + +- ``architecture``: The architecture of the system. + Valid values: ``i386, amd64`` + ``required`` +- ``bootloader``: The bootloader for the system. Depending on the + bootmethod of the virtualization platform, the options may be + restricted. + Valid values: ``grub, extlinux, pv-grub`` + ``required`` +- ``charmap``: The default charmap of the system. + Valid values: Any valid charmap like ``UTF-8``, ``ISO-8859-`` or + ``GBK``. + ``required`` +- ``hostname``: hostname to preconfigure the system with. + ``optional`` +- ``locale``: The default locale of the system. + Valid values: Any locale mentioned in ``/etc/locale.gen`` + ``required`` +- ``release``: Defines which debian release should be bootstrapped. + Valid values: ``squeeze``, ``wheezy``, ``jessie``, ``sid``, + ``oldstable``, ``stable``, ``testing``, ``unstable`` + ``required`` +- ``timezone``: Timezone of the system. + Valid values: Any filename from ``/usr/share/zoneinfo`` + ``required`` + +Packages +~~~~~~~~ + +The packages section allows you to install custom packages from a +variety of sources. + +- ``install``: A list of strings that specify which packages should + be installed. Valid values: package names optionally followed by a + ``/target`` or paths to local ``.deb`` files. +- ``install_standard``: Defines if the packages of the + ``"Standard System Utilities"`` option of the Debian installer, + provided by `tasksel `__, should be + installed or not. The problem is that with just ``debootstrap``, the + system ends up with very basic commands. This is not a problem for a + machine that will not be used interactively, but otherwise it is nice + to have at hand tools like ``bash-completion``, ``less``, ``locate``, + etc. + ``optional`` + Valid values: ``true``, ``false`` + Default: ``false`` +- ``mirror``: The default aptitude mirror. + ``optional`` + Default: ``http://http.debian.net/debian`` +- ``sources``: A map of additional sources that should be added to + the aptitude sources list. The key becomes the filename in + ``/etc/apt/sources.list.d/`` (with ``.list`` appended to it), while + the value is an array with each entry being a line. + ``optional`` +- ``components``: A list of components that should be added to the + default apt sources. For example ``contrib`` or ``non-free`` + ``optional`` + Default: ``['main']`` +- ``trusted-keys``: List of paths to ``.gpg`` keyrings that should + be added to the aptitude keyring of trusted signatures for + repositories. + ``optional`` +- ``preferences``: Allows you to pin packages through `apt + preferences `__. The setting + is an object where the key is the preference filename in + ``/etc/apt/preferences.d/``. The key ``main`` is special and refers + to the file ``/etc/apt/preferences``, which will be overwritten if + specified. + ``optional`` + The values are objects with three keys: +- ``package``: The package to pin (wildcards allowed) +- ``pin``: The release to pin the package to. +- ``pin-priority``: The priority of this pin. + +Volume +~~~~~~ + +bootstrap-vz allows a wide range of options for configuring the disk +layout of the system. It can create unpartitioned as well as partitioned +volumes using either the gpt or msdos scheme. At most, there are only +three partitions with predefined roles configurable though. They are +boot, root and swap. + +- ``backing``: Specifies the volume backing. This setting is very + provider specific. + Valid values: ``ebs``, ``s3``, ``vmdk``, ``vdi``, ``raw`` + ``required`` +- ``partitions``: A map of the partitions that should be created on + the volume. +- ``type``: The partitioning scheme to use. When using ``none``, + only root can be specified as a partition. + Valid values: ``none``, ``gpt``, ``msdos`` + ``required`` +- ``root``: Configuration of the root partition. ``required`` + + - ``size``: The size of the partition. Valid values: Any + datasize specification up to TB (e.g. 5KiB, 1MB, 6TB). + ``required`` + - ``filesystem``: The filesystem of the partition. When choosing + ``xfs``, the ``xfsprogs`` package will need to be installed. + Valid values: ``ext2``, ``ext3``, ``ext4``, ``xfs`` + ``required`` + - ``format_command``: Command to format the partition with. This + optional setting overrides the command bootstrap-vz would normally + use to format the partition. The command is specified as a string + array where each option/argument is an item in that array (much + like the `commands <../bootstrapvz/plugins/commands>`__ plugin). + ``optional`` The following variables are available: + - ``{fs}``: The filesystem of the partition. + - ``{device_path}``: The device path of the partition. + - ``{size}``: The size of the partition. + + The default command used by boostrap-vz is + ``['mkfs.{fs}', '{device_path}']``. + + - ``boot``: Configuration of the boot partition. The three + settings equal those of the root partition. + ``optional`` + - ``swap``: Configuration of the swap partition. Since the swap + partition has its own filesystem you can only specify the size for + this partition. + ``optional`` + +Plugins +~~~~~~~ + +The plugins section is a map of plugin names to whatever configuration a +plugin requires. Go to the `plugin section <../bootstrapvz/plugins>`__ +of the documentation, to see the configuration for a specific plugin. diff --git a/manifests/azure.manifest.json b/manifests/azure.manifest.json deleted file mode 100644 index 4007691..0000000 --- a/manifests/azure.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "azure", - "bootstrapper": { - "workspace": "/target", - "mirror": "http://ftp.fr.debian.org/debian/" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8", - "waagent": { - "version": "2.0.4" - } - }, - "packages": { - }, - "volume": { - "backing": "raw", - "partitions": { - "type": "msdos", - "boot": { - "size": "32MiB", - "filesystem": "ext2" - }, - "root": { - "size": "7GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "ntp": { - "servers": ["time.windows.com"] - } - } -} diff --git a/manifests/azure.manifest.yml b/manifests/azure.manifest.yml new file mode 100644 index 0000000..a6cd751 --- /dev/null +++ b/manifests/azure.manifest.yml @@ -0,0 +1,33 @@ +--- +provider: + name: azure + waagent: + version: 2.0.4 +bootstrapper: + mirror: http://ftp.fr.debian.org/debian/ + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: raw + partitions: + type: msdos + boot: + filesystem: ext2 + size: 32MiB + root: + filesystem: ext4 + size: 7GiB +packages: {} +plugins: + ntp: + servers: + - time.windows.com diff --git a/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.json b/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.json deleted file mode 100644 index 622879e..0000000 --- a/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "hvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "extlinux", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://ftp.cn.debian.org/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.yml b/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.yml new file mode 100644 index 0000000..5fa8688 --- /dev/null +++ b/manifests/ec2-ebs-debian-official-amd64-hvm-cn-north-1.manifest.yml @@ -0,0 +1,33 @@ +--- +provider: + name: ec2 + virtualization: hvm + enhanced_networking: simple + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: extlinux + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://ftp.cn.debian.org/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.json b/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.json deleted file mode 100644 index 651d211..0000000 --- a/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "hvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "extlinux", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.yml b/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.yml new file mode 100644 index 0000000..7b7f23e --- /dev/null +++ b/manifests/ec2-ebs-debian-official-amd64-hvm.manifest.yml @@ -0,0 +1,33 @@ +--- +provider: + name: ec2 + virtualization: hvm + enhanced_networking: simple + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: extlinux + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.json b/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.json deleted file mode 100644 index 4c88efb..0000000 --- a/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://ftp.cn.debian.org/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.yml b/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.yml new file mode 100644 index 0000000..2160e59 --- /dev/null +++ b/manifests/ec2-ebs-debian-official-amd64-pvm-cn-north-1.manifest.yml @@ -0,0 +1,32 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://ftp.cn.debian.org/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.json b/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.json deleted file mode 100644 index 34ced43..0000000 --- a/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.yml b/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.yml new file mode 100644 index 0000000..1729377 --- /dev/null +++ b/manifests/ec2-ebs-debian-official-amd64-pvm.manifest.yml @@ -0,0 +1,32 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-official-i386-pvm.manifest.json b/manifests/ec2-ebs-debian-official-i386-pvm.manifest.json deleted file mode 100644 index b0a7db3..0000000 --- a/manifests/ec2-ebs-debian-official-i386-pvm.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "i386", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-official-i386-pvm.manifest.yml b/manifests/ec2-ebs-debian-official-i386-pvm.manifest.yml new file mode 100644 index 0000000..360ed6d --- /dev/null +++ b/manifests/ec2-ebs-debian-official-i386-pvm.manifest.yml @@ -0,0 +1,32 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: i386 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.json b/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.json deleted file mode 100644 index 0456892..0000000 --- a/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "testing", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.yml b/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.yml index cfdbd73..7488073 100644 --- a/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.yml +++ b/manifests/ec2-ebs-debian-testing-amd64-pvm.manifest.yml @@ -1,36 +1,32 @@ --- -provider: "ec2" -virtualization: "pvm" -credentials: - access-key: "" - secret-key: "" +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva bootstrapper: - workspace: "/target" + workspace: /target image: - name: "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs" - description: "Debian {system.release} {system.architecture}" + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} system: - release: "testing" - architecture: "amd64" - bootloader: "pvgrub" - timezone: "UTC" - locale: "en_US" - charmap: "UTF-8" -packages: - #mirror: "http://cloudfront.debian.net/debian" - install_standard: true + release: testing + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC volume: - backing: "ebs" - partitions: - type: "none" - root: - size: "8GiB" - filesystem: "ext4" + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian plugins: - cloud_init: - username: "admin" - #metadata_sources: "Ec2" - disable_modules: - - "landscape" - - "byobu" - - "ssh-import-id" + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.json b/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.json deleted file mode 100644 index 98e0f0a..0000000 --- a/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "unstable", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.yml b/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.yml new file mode 100644 index 0000000..cf417a2 --- /dev/null +++ b/manifests/ec2-ebs-debian-unstable-amd64-pvm.manifest.yml @@ -0,0 +1,32 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: unstable + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.json b/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.json deleted file mode 100644 index eaf9584..0000000 --- a/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "unstable", - "sections": ["main", "contrib", "non-free"], - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "metadata_sources": "Ec2" - } - } -} diff --git a/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.yml b/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.yml new file mode 100644 index 0000000..f7f4935 --- /dev/null +++ b/manifests/ec2-ebs-debian-unstable-contrib-amd64-pvm.manifest.yml @@ -0,0 +1,36 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: unstable + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian + components: + - main + - contrib + - non-free +plugins: + cloud_init: + metadata_sources: Ec2 + username: admin diff --git a/manifests/ec2-ebs-partitioned.manifest.json b/manifests/ec2-ebs-partitioned.manifest.json deleted file mode 100644 index 441c076..0000000 --- a/manifests/ec2-ebs-partitioned.manifest.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture} AMI ({virtualization})" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "msdos", - "root": { - "size": "1GiB", - "filesystem": "ext4" - } - } - } -} diff --git a/manifests/ec2-ebs-partitioned.manifest.yml b/manifests/ec2-ebs-partitioned.manifest.yml new file mode 100644 index 0000000..2cf1652 --- /dev/null +++ b/manifests/ec2-ebs-partitioned.manifest.yml @@ -0,0 +1,28 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} AMI ({provider.virtualization}) +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: msdos + root: + filesystem: ext4 + size: 1GiB +packages: + mirror: http://cloudfront.debian.net/debian diff --git a/manifests/ec2-ebs-single.manifest.json b/manifests/ec2-ebs-single.manifest.json deleted file mode 100644 index 1e7f883..0000000 --- a/manifests/ec2-ebs-single.manifest.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture} AMI ({virtualization})" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "1GiB", - "filesystem": "ext4" - } - } - } -} diff --git a/manifests/ec2-ebs-single.manifest.yml b/manifests/ec2-ebs-single.manifest.yml new file mode 100644 index 0000000..25f1b1a --- /dev/null +++ b/manifests/ec2-ebs-single.manifest.yml @@ -0,0 +1,28 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} AMI ({provider.virtualization}) +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 1GiB +packages: + mirror: http://cloudfront.debian.net/debian diff --git a/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.json b/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.json deleted file mode 100644 index 6039091..0000000 --- a/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null, - // "certificate": null, - // "private-key": null, - // "user-id": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}", - "description": "Debian {system.release} {system.architecture} AMI", - "bucket": "debian-amis-cn-north-1", - "region": "cn-north-1" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://ftp.cn.debian.org/debian" - }, - "volume": { - "backing": "s3", - "partitions": { - "type": "none", - "root": { - "size": "4GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "cloud_init": { - "username": "admin", - "disable_modules": [ "landscape", "byobu", "ssh-import-id" ] - } - } -} diff --git a/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.yml b/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.yml new file mode 100644 index 0000000..6268883 --- /dev/null +++ b/manifests/ec2-s3-debian-official-amd64-pvm-cn-north-1.manifest.yml @@ -0,0 +1,40 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva + # certificate: /path/to/your/certificate.pem + # private-key: /path/to/your/private.key + # user-id: arn:aws:iam::123456789012:user/iamuser +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d} + description: Debian {system.release} {system.architecture} AMI + bucket: debian-amis-cn-north-1 + region: cn-north-1 +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: s3 + partitions: + type: none + root: + filesystem: ext4 + size: 4GiB +packages: + mirror: http://ftp.cn.debian.org/debian +plugins: + cloud_init: + disable_modules: + - landscape + - byobu + - ssh-import-id + username: admin diff --git a/manifests/ec2-s3.manifest.json b/manifests/ec2-s3.manifest.json deleted file mode 100644 index fcf7ca2..0000000 --- a/manifests/ec2-s3.manifest.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null, - // "certificate": null, - // "private-key": null, - // "user-id": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture} AMI", - "bucket": "debian-amis", - "region": "us-west-1" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "s3", - "partitions": { - "type": "none", - "root": { - "size": "4GiB", - "filesystem": "ext4" - } - } - } -} diff --git a/manifests/ec2-s3.manifest.yml b/manifests/ec2-s3.manifest.yml new file mode 100644 index 0000000..bb837ff --- /dev/null +++ b/manifests/ec2-s3.manifest.yml @@ -0,0 +1,33 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva + # certificate: /path/to/your/certificate.pem + # private-key: /path/to/your/private.key + # user-id: arn:aws:iam::123456789012:user/iamuser +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} AMI + bucket: debian-amis + region: us-west-1 +system: + release: wheezy + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: s3 + partitions: + type: none + root: + filesystem: ext4 + size: 4GiB +packages: + mirror: http://cloudfront.debian.net/debian diff --git a/manifests/gce-backports.manifest.yml b/manifests/gce-backports.manifest.yml new file mode 100644 index 0000000..7c3ed8c --- /dev/null +++ b/manifests/gce-backports.manifest.yml @@ -0,0 +1,42 @@ +--- +provider: + name: gce +bootstrapper: + workspace: /target +image: + name: disk + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: raw + partitions: + type: msdos + root: + filesystem: ext4 + size: 10GiB +packages: + mirror: http://gce_debian_mirror.storage.googleapis.com/ + components: + - main + - contrib + - non-free + preferences: + backport-kernel: + - package: linux-image-* initramfs-tools + pin: release n=wheezy-backports + pin-priority: 500 + backport-ssh: + - package: init-system-helpers openssh-sftp-server openssh-client openssh-server + pin: release n=wheezy-backports + pin-priority: 500 +plugins: + google_cloud_sdk: {} + ntp: + servers: + - metadata.google.internal diff --git a/manifests/gce.manifest.json b/manifests/gce.manifest.json deleted file mode 100644 index 2c6fdad..0000000 --- a/manifests/gce.manifest.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "provider": "gce", - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "disk", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "sections": ["main", "contrib", "non-free"], - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://gce_debian_mirror.storage.googleapis.com/", - "preferences": { - "backport-kernel": [ - { - "package": "linux-image-* initramfs-tools", - "pin": "release n=wheezy-backports", - "pin-priority": 500 - } - ] - } - }, - "plugins": { - "ntp": { - "servers": ["metadata.google.internal"] - } - }, - "volume": { - "backing": "raw", - "partitions": { - "type": "msdos", - "root": { - "size": "10GiB", - "filesystem": "ext4" - } - } - } -} diff --git a/manifests/gce.manifest.yml b/manifests/gce.manifest.yml new file mode 100644 index 0000000..bf9ba52 --- /dev/null +++ b/manifests/gce.manifest.yml @@ -0,0 +1,33 @@ +--- +provider: + name: gce +bootstrapper: + workspace: /target +image: + name: disk + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: raw + partitions: + type: msdos + root: + filesystem: ext4 + size: 10GiB +packages: + mirror: http://gce_debian_mirror.storage.googleapis.com/ + components: + - main + - contrib + - non-free +plugins: + google_cloud_sdk: {} + ntp: + servers: + - metadata.google.internal diff --git a/manifests/kvm-virtio.manifest.json b/manifests/kvm-virtio.manifest.json deleted file mode 100644 index a7a92b0..0000000 --- a/manifests/kvm-virtio.manifest.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "provider": "kvm", - "bootstrapper": { - "workspace": "/target", - "mirror": "http://ftp.fr.debian.org/debian/" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8", - "virtio_modules": [ "virtio_pci", "virtio_blk" ] - }, - "packages": {}, - "volume": { - "backing": "raw", - "partitions": { - "type": "msdos", - "boot": { - "size": "32MiB", - "filesystem": "ext2" - }, - "root": { - "size": "864MiB", - "filesystem": "ext4" - }, - "swap": {"size": "128MiB"} - } - }, - "plugins": { - "root_password": { - "password": "test" - } - } -} diff --git a/manifests/kvm-virtio.manifest.yml b/manifests/kvm-virtio.manifest.yml new file mode 100644 index 0000000..81e6c59 --- /dev/null +++ b/manifests/kvm-virtio.manifest.yml @@ -0,0 +1,34 @@ +--- +provider: + name: kvm + virtio_modules: + - virtio_pci + - virtio_blk +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: raw + partitions: + type: msdos + boot: + filesystem: ext2 + size: 32MiB + root: + filesystem: ext4 + size: 864MiB + swap: + size: 128MiB +packages: {} +plugins: + root_password: + password: test diff --git a/manifests/kvm.manifest.json b/manifests/kvm.manifest.json deleted file mode 100644 index ea4c24a..0000000 --- a/manifests/kvm.manifest.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "provider": "kvm", - "bootstrapper": { - "workspace": "/target", - "mirror": "http://ftp.fr.debian.org/debian/" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": {}, - "volume": { - "backing": "raw", - "partitions": { - "type": "msdos", - "boot": { - "size": "32MiB", - "filesystem": "ext2" - }, - "root": { - "size": "864MiB", - "filesystem": "ext4" - }, - "swap": {"size": "128MiB"} - } - }, - "plugins": { - "root_password": { - "password": "test" - } - } -} diff --git a/manifests/kvm.manifest.yml b/manifests/kvm.manifest.yml new file mode 100644 index 0000000..4b35e63 --- /dev/null +++ b/manifests/kvm.manifest.yml @@ -0,0 +1,31 @@ +--- +provider: + name: kvm +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: raw + partitions: + type: msdos + boot: + filesystem: ext2 + size: 32MiB + root: + filesystem: ext4 + size: 864MiB + swap: + size: 128MiB +packages: {} +plugins: + root_password: + password: test diff --git a/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.json b/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.json deleted file mode 100644 index 3be23f7..0000000 --- a/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "squeeze", - "architecture": "amd64", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "admin_user": { - "username": "admin" - } - } -} diff --git a/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.yml b/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.yml new file mode 100644 index 0000000..b36e832 --- /dev/null +++ b/manifests/squeeze-ec2-ebs-debian-official-amd64-pvm.manifest.yml @@ -0,0 +1,31 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: squeeze + architecture: amd64 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + admin_user: + username: admin diff --git a/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.json b/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.json deleted file mode 100644 index aaa5669..0000000 --- a/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "provider": "ec2", - "virtualization": "pvm", - "credentials": { - // "access-key": null, - // "secret-key": null - }, - - "bootstrapper": { - "workspace": "/target" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{virtualization}-{%Y}-{%m}-{%d}-ebs", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "squeeze", - "architecture": "i386", - "bootloader": "pvgrub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": { - "mirror": "http://cloudfront.debian.net/debian" - }, - "volume": { - "backing": "ebs", - "partitions": { - "type": "none", - "root": { - "size": "8GiB", - "filesystem": "ext4" - } - } - }, - "plugins": { - "admin_user": { - "username": "admin" - } - } -} diff --git a/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.yml b/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.yml new file mode 100644 index 0000000..539c2d3 --- /dev/null +++ b/manifests/squeeze-ec2-ebs-debian-official-i386-pvm.manifest.yml @@ -0,0 +1,31 @@ +--- +provider: + name: ec2 + virtualization: pvm + # credentials: + # access-key: AFAKEACCESSKEYFORAWS + # secret-key: thes3cr3tkeyf0ryourawsaccount/FS4d8Qdva +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{provider.virtualization}-{%Y}-{%m}-{%d}-ebs + description: Debian {system.release} {system.architecture} +system: + release: squeeze + architecture: i386 + bootloader: pvgrub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: ebs + partitions: + type: none + root: + filesystem: ext4 + size: 8GiB +packages: + mirror: http://cloudfront.debian.net/debian +plugins: + admin_user: + username: admin diff --git a/manifests/virtualbox-vagrant.manifest.json b/manifests/virtualbox-vagrant.manifest.json deleted file mode 100644 index 6641ac2..0000000 --- a/manifests/virtualbox-vagrant.manifest.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "provider": "virtualbox", - "bootstrapper": { - "workspace": "/target", - "guest_additions": "/root/images/VBoxGuestAdditions.iso" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8", - "hostname": "localhost" - }, - "packages": {}, - "volume": { - "backing": "vmdk", - "partitions": { - "type": "msdos", - "boot": { - "size": "64MiB", - "filesystem": "ext2" - }, - "root": { - "size": "1856MiB", - "filesystem": "ext4" - }, - "swap": {"size": "128MiB"} - } - }, - "plugins": { - "vagrant": { - } - } -} diff --git a/manifests/virtualbox-vagrant.manifest.yml b/manifests/virtualbox-vagrant.manifest.yml new file mode 100644 index 0000000..e440ca7 --- /dev/null +++ b/manifests/virtualbox-vagrant.manifest.yml @@ -0,0 +1,32 @@ +--- +provider: + name: virtualbox + guest_additions: /root/images/VBoxGuestAdditions.iso +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + hostname: localhost + locale: en_US + timezone: UTC +volume: + backing: vmdk + partitions: + type: msdos + boot: + filesystem: ext2 + size: 64MiB + root: + filesystem: ext4 + size: 1856MiB + swap: + size: 128MiB +packages: {} +plugins: + vagrant: {} diff --git a/manifests/virtualbox.manifest.json b/manifests/virtualbox.manifest.json deleted file mode 100644 index 062cc9c..0000000 --- a/manifests/virtualbox.manifest.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "provider": "virtualbox", - "bootstrapper": { - "workspace": "/target" - // "guest_additions": "/root/images/VBoxGuestAdditions.iso" - }, - "image": { - "name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}", - "description": "Debian {system.release} {system.architecture}" - }, - "system": { - "release": "wheezy", - "architecture": "amd64", - "bootloader": "grub", - "timezone": "UTC", - "locale": "en_US", - "charmap": "UTF-8" - }, - "packages": {}, - "volume": { - "backing": "vdi", - "partitions": { - "type": "msdos", - "boot": { - "size": "32MiB", - "filesystem": "ext2" - }, - "root": { - "size": "864MiB", - "filesystem": "ext4" - }, - "swap": {"size": "128MiB"} - } - } -} diff --git a/manifests/virtualbox.manifest.yml b/manifests/virtualbox.manifest.yml new file mode 100644 index 0000000..6a1b358 --- /dev/null +++ b/manifests/virtualbox.manifest.yml @@ -0,0 +1,29 @@ +--- +provider: + name: virtualbox + guest_additions: /root/images/VBoxGuestAdditions.iso +bootstrapper: + workspace: /target +image: + name: debian-{system.release}-{system.architecture}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + release: wheezy + architecture: amd64 + bootloader: grub + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + backing: vdi + partitions: + type: msdos + boot: + filesystem: ext2 + size: 32MiB + root: + filesystem: ext4 + size: 864MiB + swap: + size: 128MiB +packages: {} diff --git a/setup.py b/setup.py index e68979a..4d8493d 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,10 @@ setup(name='bootstrap-vz', version=find_version(os.path.join(os.path.dirname(__file__), 'bootstrapvz/__init__.py')), packages=find_packages(), include_package_data=True, - entry_points={'console_scripts': ['bootstrap-vz = bootstrapvz.base:main']}, + entry_points={'console_scripts': ['bootstrap-vz = bootstrapvz.base:main', + 'bootstrap-vz-remote = bootstrapvz.remote.main:main', + 'bootstrap-vz-server = bootstrapvz.remote.server:main', + ]}, install_requires=['termcolor >= 1.1.0', 'fysom >= 1.0.15', 'jsonschema >= 2.3.0', diff --git a/tests/README.rst b/tests/README.rst new file mode 100644 index 0000000..c14730c --- /dev/null +++ b/tests/README.rst @@ -0,0 +1,22 @@ +The testing framework consists of two parts: +The unit tests and the integration tests. + +The `unit tests `__ are responsible for testing individual +parts of bootstrap-vz, while the `integration tests `__ test +entire manifests by bootstrapping and booting them. + +Selecting tests +--------------- +To run one specific test suite simply append the module path to tox: + +.. code:: sh + + $ tox -e unit tests.unit.releases_tests + +Specific tests can be selected by appending the function name with a colon +to the modulepath -- to run more than one tests, simply attach more arguments. + + +.. code:: sh + + $ tox -e unit tests.unit.releases_tests:test_lt tests.unit.releases_tests:test_eq diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/README.rst b/tests/integration/README.rst new file mode 100644 index 0000000..01c4f0f --- /dev/null +++ b/tests/integration/README.rst @@ -0,0 +1,73 @@ +Integration tests +================= +`Integration tests`__ test +bootstrap-vz in its entirety. +This testing includes building images from manifests and +creating/booting said images. + +__ http://en.wikipedia.org/wiki/Integration_testing + +Since hardcoding manifests for each test, bootstrapping them and booting the +resulting images is too much code for a single test, a testing harness has +been developed that reduces each test to it's bare essentials: + +* Combine available `manifest partials <#manifest-partials>`__ into a single manifest +* Boot an instance from a manifest +* Run tests on the booted instance + +In order for the integration testing harness to be able to bootstrap it must +know about your `build-servers <../../bootstrapvz/remote#build-servers-yml>`__. +Depending on the manifest that is bootstrapped, the harness chooses +a fitting build-server, connects to it and starts the bootstrapping process. + +When running integration tests, the framework will look for ``build-servers.yml`` +at the root of the repo and raise an error if it is not found. + + +Manifest combinations +--------------------- +The tests mainly focus on varying key parts of an image +(e.g. partitioning, Debian release, bootloader, ec2 backing, ec2 virtualization method) +that have been problem areas. +Essentially the tests are the cartesian product of these key parts. + + +Aborting a test +--------------- +You can press ``Ctrl+C`` at any time during the testing to abort - +the harness will automatically clean up any temporary resources and shut down +running instances. Pressing ``Ctrl+C`` a second time stops the cleanup and quits +immediately. + + +Manifest partials +----------------- +Instead of creating manifests from scratch for each single test, reusable parts +are factored out into partials in the manifest folder. +This allows code like this: + +.. code:: python + + partials = {'vdi': '{provider: {name: virtualbox}, volume: {backing: vdi}}', + 'vmdk': '{provider: {name: virtualbox}, volume: {backing: vmdk}}', + } + + def test_unpartitioned_extlinux_oldstable(): + std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + +The code above produces a manifest for Debian stable 64-bit unpartitioned +virtualbox VMDK image. +``root_password`` is a special partial in that the actual password is +randomly generated on load. + + +Missing parts +------------- +The integration testing harness is in no way complete. + +* It still has no support for providers other than virtualbox and EC2. +* Creating an SSH connection to a booted instance is cumbersome and does not + happen in any of the tests - this would be particularly useful when manifests + are to be tested beyond whether they boot up. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/ec2_ebs_hvm_tests.py b/tests/integration/ec2_ebs_hvm_tests.py new file mode 100644 index 0000000..5cda8cd --- /dev/null +++ b/tests/integration/ec2_ebs_hvm_tests.py @@ -0,0 +1,129 @@ +from manifests import merge_manifest_data +from tools import boot_manifest + +partials = {'ebs_hvm': ''' +provider: + name: ec2 + virtualization: hvm +volume: {backing: ebs} +''', + 'extlinux': 'system: {bootloader: extlinux}', + 'grub': 'system: {bootloader: grub}', + } + + +def test_unpartitioned_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_extlinux_stable(): + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_extlinux_stable(): + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_extlinux_stable(): + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_grub_stable(): + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_grub_stable(): + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['extlinux']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_grub_unstable(): + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_grub_unstable(): + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_hvm'], partials['grub']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't2.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/integration/ec2_ebs_pvm_tests.py b/tests/integration/ec2_ebs_pvm_tests.py new file mode 100644 index 0000000..3c05e11 --- /dev/null +++ b/tests/integration/ec2_ebs_pvm_tests.py @@ -0,0 +1,92 @@ +from manifests import merge_manifest_data +from tools import boot_manifest + +partials = {'ebs_pvm': ''' +provider: + name: ec2 + virtualization: pvm +system: {bootloader: pvgrub} +volume: {backing: ebs} +''' + } + + +def test_unpartitioned_oldstable(): + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_oldstable(): + std_partials = ['base', 'oldstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_oldstable(): + std_partials = ['base', 'oldstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_stable(): + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_stable(): + std_partials = ['base', 'stable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_stable(): + std_partials = ['base', 'stable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_unstable(): + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_msdos_unstable(): + std_partials = ['base', 'unstable64', 'msdos', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_gpt_unstable(): + std_partials = ['base', 'unstable64', 'gpt', 'single_partition', 'root_password'] + custom_partials = [partials['ebs_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 't1.micro'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/integration/ec2_s3_pvm_tests.py b/tests/integration/ec2_s3_pvm_tests.py new file mode 100644 index 0000000..da64923 --- /dev/null +++ b/tests/integration/ec2_s3_pvm_tests.py @@ -0,0 +1,41 @@ +from manifests import merge_manifest_data +from tools import boot_manifest +import random + +s3_bucket_name = '{id:x}'.format(id=random.randrange(16 ** 16)) +partials = {'s3_pvm': ''' +provider: + name: ec2 + virtualization: pvm +image: {bucket: ''' + s3_bucket_name + '''} +system: {bootloader: pvgrub} +volume: {backing: s3} +''' + } + + +def test_unpartitioned_oldstable(): + std_partials = ['base', 'oldstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_stable(): + std_partials = ['base', 'stable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) + + +def test_unpartitioned_unstable(): + std_partials = ['base', 'unstable64', 'unpartitioned', 'root_password'] + custom_partials = [partials['s3_pvm']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + boot_vars = {'instance_type': 'm1.small'} + with boot_manifest(manifest_data, boot_vars) as instance: + print(instance.get_console_output().output) diff --git a/tests/integration/manifests/__init__.py b/tests/integration/manifests/__init__.py new file mode 100644 index 0000000..d6afbd6 --- /dev/null +++ b/tests/integration/manifests/__init__.py @@ -0,0 +1,54 @@ +import os.path +import glob +from bootstrapvz.common.tools import load_data + +partial_json = glob.glob(os.path.join(os.path.dirname(__file__), '*.yml')) +partial_yaml = glob.glob(os.path.join(os.path.dirname(__file__), '*.json')) + +partials = {} +for path in partial_json + partial_yaml: + key = os.path.splitext(os.path.basename(path))[0] + if key in partials: + msg = 'Error when loading partial manifests: The partial {key} exists twice'.format(key=key) + raise Exception(msg) + partials[key] = load_data(path) + +import random +import string +pool = string.ascii_uppercase + string.ascii_lowercase + string.digits +random_password = ''.join(random.choice(pool) for _ in range(16)) +partials['root_password']['plugins']['root_password']['password'] = random_password + + +def merge_manifest_data(standard_partials=[], custom=[]): + import yaml + manifest_data = [partials[name] for name in standard_partials] + manifest_data.extend(yaml.load(data) for data in custom) + return merge_dicts(*manifest_data) + + +# Snatched from here: http://stackoverflow.com/a/7205107 +def merge_dicts(*args): + def clone(obj): + copy = obj + if isinstance(obj, dict): + copy = {key: clone(value) for key, value in obj.iteritems()} + if isinstance(obj, list): + copy = [clone(value) for value in obj] + if isinstance(obj, set): + copy = set([clone(value) for value in obj]) + return copy + + def merge(a, b, path=[]): + for key in b: + if key in a: + if isinstance(a[key], dict) and isinstance(b[key], dict): + merge(a[key], b[key], path + [str(key)]) + elif a[key] == b[key]: + pass + else: + raise Exception('Conflict at `{path}\''.format(path='.'.join(path + [str(key)]))) + else: + a[key] = clone(b[key]) + return a + return reduce(merge, args, {}) diff --git a/tests/integration/manifests/base.yml b/tests/integration/manifests/base.yml new file mode 100644 index 0000000..3dec767 --- /dev/null +++ b/tests/integration/manifests/base.yml @@ -0,0 +1,15 @@ +--- +provider: {} +bootstrapper: + workspace: /target + tarball: true +image: + name: deb-{system.release}-{system.architecture}-{system.bootloader}-{volume.partitions.type}-{%y}{%m}{%d} + description: Debian {system.release} {system.architecture} +system: + charmap: UTF-8 + locale: en_US + timezone: UTC +volume: + partitions: {} +packages: {} diff --git a/tests/integration/manifests/extlinux.yml b/tests/integration/manifests/extlinux.yml new file mode 100644 index 0000000..13a8a43 --- /dev/null +++ b/tests/integration/manifests/extlinux.yml @@ -0,0 +1,3 @@ +--- +system: + bootloader: extlinux diff --git a/tests/integration/manifests/gpt.yml b/tests/integration/manifests/gpt.yml new file mode 100644 index 0000000..1ece4c2 --- /dev/null +++ b/tests/integration/manifests/gpt.yml @@ -0,0 +1,4 @@ +--- +volume: + partitions: + type: gpt diff --git a/tests/integration/manifests/grub.yml b/tests/integration/manifests/grub.yml new file mode 100644 index 0000000..4dfa67f --- /dev/null +++ b/tests/integration/manifests/grub.yml @@ -0,0 +1,3 @@ +--- +system: + bootloader: grub diff --git a/tests/integration/manifests/msdos.yml b/tests/integration/manifests/msdos.yml new file mode 100644 index 0000000..5795f95 --- /dev/null +++ b/tests/integration/manifests/msdos.yml @@ -0,0 +1,4 @@ +--- +volume: + partitions: + type: msdos diff --git a/tests/integration/manifests/oldstable64.yml b/tests/integration/manifests/oldstable64.yml new file mode 100644 index 0000000..3281ca7 --- /dev/null +++ b/tests/integration/manifests/oldstable64.yml @@ -0,0 +1,4 @@ +--- +system: + release: oldstable + architecture: amd64 diff --git a/tests/integration/manifests/partitioned.yml b/tests/integration/manifests/partitioned.yml new file mode 100644 index 0000000..c44eab1 --- /dev/null +++ b/tests/integration/manifests/partitioned.yml @@ -0,0 +1,11 @@ +--- +volume: + partitions: + boot: + filesystem: ext2 + size: 64MiB + root: + filesystem: ext4 + size: 832MiB + swap: + size: 128MiB diff --git a/tests/integration/manifests/root_password.yml b/tests/integration/manifests/root_password.yml new file mode 100644 index 0000000..fdc826a --- /dev/null +++ b/tests/integration/manifests/root_password.yml @@ -0,0 +1,4 @@ +--- +plugins: + root_password: + password: random password set by the partial manifest loader diff --git a/tests/integration/manifests/single_partition.yml b/tests/integration/manifests/single_partition.yml new file mode 100644 index 0000000..ce4c247 --- /dev/null +++ b/tests/integration/manifests/single_partition.yml @@ -0,0 +1,6 @@ +--- +volume: + partitions: + root: + filesystem: ext4 + size: 1GiB diff --git a/tests/integration/manifests/stable64.yml b/tests/integration/manifests/stable64.yml new file mode 100644 index 0000000..8f3cb82 --- /dev/null +++ b/tests/integration/manifests/stable64.yml @@ -0,0 +1,4 @@ +--- +system: + release: stable + architecture: amd64 diff --git a/tests/integration/manifests/stable86.yml b/tests/integration/manifests/stable86.yml new file mode 100644 index 0000000..1e6def2 --- /dev/null +++ b/tests/integration/manifests/stable86.yml @@ -0,0 +1,4 @@ +--- +system: + release: stable + architecture: x86 diff --git a/tests/integration/manifests/unpartitioned.yml b/tests/integration/manifests/unpartitioned.yml new file mode 100644 index 0000000..4f881ba --- /dev/null +++ b/tests/integration/manifests/unpartitioned.yml @@ -0,0 +1,7 @@ +--- +volume: + partitions: + type: none + root: + filesystem: ext4 + size: 1GiB diff --git a/tests/integration/manifests/unstable64.yml b/tests/integration/manifests/unstable64.yml new file mode 100644 index 0000000..4cdf9eb --- /dev/null +++ b/tests/integration/manifests/unstable64.yml @@ -0,0 +1,4 @@ +--- +system: + release: unstable + architecture: amd64 diff --git a/tests/integration/providers/README.rst b/tests/integration/providers/README.rst new file mode 100644 index 0000000..9452950 --- /dev/null +++ b/tests/integration/providers/README.rst @@ -0,0 +1,2 @@ +Integration test providers +========================== diff --git a/tests/integration/providers/__init__.py b/tests/integration/providers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/providers/ec2/__init__.py b/tests/integration/providers/ec2/__init__.py new file mode 100644 index 0000000..39c044f --- /dev/null +++ b/tests/integration/providers/ec2/__init__.py @@ -0,0 +1,139 @@ +from contextlib import contextmanager +from tests.integration.tools import waituntil +import logging +log = logging.getLogger(__name__) + + +@contextmanager +def prepare_bootstrap(manifest, build_server): + if manifest.volume['backing'] == 's3': + credentials = {'access-key': build_server.build_settings['ec2-credentials']['access-key'], + 'secret-key': build_server.build_settings['ec2-credentials']['secret-key']} + from boto.s3 import connect_to_region as s3_connect + s3_connection = s3_connect(manifest.image['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) + log.debug('Creating S3 bucket') + bucket = s3_connection.create_bucket(manifest.image['bucket'], location=manifest.image['region']) + try: + yield + finally: + log.debug('Deleting S3 bucket') + for item in bucket.list(): + bucket.delete_key(item.key) + s3_connection.delete_bucket(manifest.image['bucket']) + else: + yield + + +@contextmanager +def boot_image(manifest, build_server, bootstrap_info, instance_type=None): + + credentials = {'access-key': build_server.build_settings['ec2-credentials']['access-key'], + 'secret-key': build_server.build_settings['ec2-credentials']['secret-key']} + from boto.ec2 import connect_to_region as ec2_connect + ec2_connection = ec2_connect(bootstrap_info._ec2['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) + from boto.vpc import connect_to_region as vpc_connect + vpc_connection = vpc_connect(bootstrap_info._ec2['region'], + aws_access_key_id=credentials['access-key'], + aws_secret_access_key=credentials['secret-key']) + + if manifest.volume['backing'] == 'ebs': + from images import EBSImage + image = EBSImage(bootstrap_info._ec2['image'], ec2_connection) + if manifest.volume['backing'] == 's3': + from images import S3Image + image = S3Image(bootstrap_info._ec2['image'], ec2_connection) + + try: + with run_instance(image, manifest, instance_type, ec2_connection, vpc_connection) as instance: + yield instance + finally: + image.destroy() + + +@contextmanager +def run_instance(image, manifest, instance_type, ec2_connection, vpc_connection): + + with create_env(ec2_connection, vpc_connection) as boot_env: + + def waituntil_instance_is(state): + def instance_has_state(): + instance.update() + return instance.state == state + return waituntil(instance_has_state, timeout=600, interval=3) + + instance = None + try: + log.debug('Booting ec2 instance') + reservation = image.ami.run(instance_type=instance_type, + subnet_id=boot_env['subnet_id']) + [instance] = reservation.instances + instance.add_tag('Name', 'bootstrap-vz test instance') + + if not waituntil_instance_is('running'): + raise EC2InstanceStartupException('Timeout while booting instance') + + if not waituntil(lambda: instance.get_console_output().output is not None, timeout=600, interval=3): + raise EC2InstanceStartupException('Timeout while fetching console output') + + from bootstrapvz.common.releases import wheezy + if manifest.release <= wheezy: + termination_string = 'INIT: Entering runlevel: 2' + else: + termination_string = 'Debian GNU/Linux' + + console_output = instance.get_console_output().output + if termination_string not in console_output: + last_lines = '\n'.join(console_output.split('\n')[-50:]) + message = ('The instance did not boot properly.\n' + 'Last 50 lines of console output:\n{output}'.format(output=last_lines)) + raise EC2InstanceStartupException(message) + + yield instance + finally: + if instance is not None: + log.debug('Terminating ec2 instance') + instance.terminate() + if not waituntil_instance_is('terminated'): + raise EC2InstanceStartupException('Timeout while terminating instance') + # wait a little longer, aws can be a little slow sometimes and think the instance is still running + import time + time.sleep(15) + + +@contextmanager +def create_env(ec2_connection, vpc_connection): + + vpc_cidr = '10.0.0.0/28' + subnet_cidr = '10.0.0.0/28' + + @contextmanager + def vpc(): + log.debug('Creating VPC') + vpc = vpc_connection.create_vpc(vpc_cidr) + try: + yield vpc + finally: + log.debug('Deleting VPC') + vpc_connection.delete_vpc(vpc.id) + + @contextmanager + def subnet(vpc): + log.debug('Creating subnet') + subnet = vpc_connection.create_subnet(vpc.id, subnet_cidr) + try: + yield subnet + finally: + log.debug('Deleting subnet') + vpc_connection.delete_subnet(subnet.id) + + with vpc() as _vpc: + with subnet(_vpc) as _subnet: + yield {'subnet_id': _subnet.id} + + +class EC2InstanceStartupException(Exception): + pass diff --git a/tests/integration/providers/ec2/images.py b/tests/integration/providers/ec2/images.py new file mode 100644 index 0000000..6375f8a --- /dev/null +++ b/tests/integration/providers/ec2/images.py @@ -0,0 +1,27 @@ +import logging +log = logging.getLogger(__name__) + + +class AmazonMachineImage(object): + + def __init__(self, image_id, ec2_connection): + self.ec2_connection = ec2_connection + self.ami = self.ec2_connection.get_image(image_id) + + +class EBSImage(AmazonMachineImage): + + def destroy(self): + log.debug('Deleting AMI') + self.ami.deregister() + for device, block_device_type in self.ami.block_device_mapping.items(): + self.ec2_connection.delete_snapshot(block_device_type.snapshot_id) + del self.ami + + +class S3Image(AmazonMachineImage): + + def destroy(self): + log.debug('Deleting AMI') + self.ami.deregister() + del self.ami diff --git a/tests/integration/providers/virtualbox/__init__.py b/tests/integration/providers/virtualbox/__init__.py new file mode 100644 index 0000000..f9460da --- /dev/null +++ b/tests/integration/providers/virtualbox/__init__.py @@ -0,0 +1,55 @@ +from contextlib import contextmanager +import logging +log = logging.getLogger(__name__) + + +@contextmanager +def boot_image(manifest, build_server, bootstrap_info): + from bootstrapvz.remote.build_servers.local import LocalBuildServer + if isinstance(build_server, LocalBuildServer): + image_path = bootstrap_info.volume.image_path + else: + import tempfile + handle, image_path = tempfile.mkstemp() + import os + os.close(handle) + try: + build_server.download(bootstrap_info.volume.image_path, image_path) + except (Exception, KeyboardInterrupt): + os.remove(image_path) + raise + finally: + build_server.delete(bootstrap_info.volume.image_path) + + from image import VirtualBoxImage + image = VirtualBoxImage(image_path) + + import hashlib + image_hash = hashlib.sha1(image_path).hexdigest() + instance_name = 'bootstrap-vz-{hash}'.format(hash=image_hash[:8]) + + try: + image.open() + try: + with run_instance(image, instance_name, manifest) as instance: + yield instance + finally: + image.close() + finally: + image.destroy() + + +@contextmanager +def run_instance(image, instance_name, manifest): + from instance import VirtualBoxInstance + instance = VirtualBoxInstance(image, instance_name, + manifest.system['architecture'], manifest.release) + try: + instance.create() + try: + instance.boot() + yield instance + finally: + instance.shutdown() + finally: + instance.destroy() diff --git a/tests/integration/providers/virtualbox/image.py b/tests/integration/providers/virtualbox/image.py new file mode 100644 index 0000000..7d51eb8 --- /dev/null +++ b/tests/integration/providers/virtualbox/image.py @@ -0,0 +1,27 @@ +import virtualbox +import logging +log = logging.getLogger(__name__) + + +class VirtualBoxImage(object): + + def __init__(self, image_path): + self.image_path = image_path + self.vbox = virtualbox.VirtualBox() + + def open(self): + log.debug('Opening vbox medium `{path}\''.format(path=self.image_path)) + self.medium = self.vbox.open_medium(self.image_path, # location + virtualbox.library.DeviceType.hard_disk, # device_type + virtualbox.library.AccessMode.read_only, # access_mode + False) # force_new_uuid + + def close(self): + log.debug('Closing vbox medium `{path}\''.format(path=self.image_path)) + self.medium.close() + + def destroy(self): + log.debug('Deleting vbox image `{path}\''.format(path=self.image_path)) + import os + os.remove(self.image_path) + del self.image_path diff --git a/tests/integration/providers/virtualbox/instance.py b/tests/integration/providers/virtualbox/instance.py new file mode 100644 index 0000000..5ab28da --- /dev/null +++ b/tests/integration/providers/virtualbox/instance.py @@ -0,0 +1,121 @@ +import virtualbox +from contextlib import contextmanager +from tests.integration.tools import waituntil +import logging +log = logging.getLogger(__name__) + + +class VirtualBoxInstance(object): + + cpus = 1 + memory = 256 + + def __init__(self, image, name, arch, release): + self.image = image + self.name = name + self.arch = arch + self.release = release + self.vbox = virtualbox.VirtualBox() + manager = virtualbox.Manager() + self.session = manager.get_session() + + def create(self): + log.debug('Creating vbox machine `{name}\''.format(name=self.name)) + # create machine + os_type = {'x86': 'Debian', + 'amd64': 'Debian_64'}.get(self.arch) + self.machine = self.vbox.create_machine(settings_file='', name=self.name, + groups=[], os_type_id=os_type, flags='') + self.machine.cpu_count = self.cpus + self.machine.memory_size = self.memory + self.machine.save_settings() # save settings, so that we can register it + self.vbox.register_machine(self.machine) + + # attach image + log.debug('Attaching SATA storage controller to vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + strg_ctrl = machine.add_storage_controller('SATA Controller', + virtualbox.library.StorageBus.sata) + strg_ctrl.port_count = 1 + machine.attach_device(name='SATA Controller', controller_port=0, device=0, + type_p=virtualbox.library.DeviceType.hard_disk, + medium=self.image.medium) + machine.save_settings() + + # redirect serial port + log.debug('Enabling serial port on vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + serial_port = machine.get_serial_port(0) + serial_port.enabled = True + import tempfile + handle, self.serial_port_path = tempfile.mkstemp() + import os + os.close(handle) + serial_port.path = self.serial_port_path + serial_port.host_mode = virtualbox.library.PortMode.host_pipe + serial_port.server = True # Create the socket on startup + machine.save_settings() + + def boot(self): + log.debug('Booting vbox machine `{name}\''.format(name=self.name)) + self.machine.launch_vm_process(self.session, 'headless').wait_for_completion(-1) + from tests.integration.tools import read_from_socket + # Gotta figure out a more reliable way to check when the system is done booting. + # Maybe bootstrapped unit test images should have a startup script that issues + # a callback to the host. + from bootstrapvz.common.releases import wheezy + if self.release <= wheezy: + termination_string = 'INIT: Entering runlevel: 2' + else: + termination_string = 'Debian GNU/Linux' + self.console_output = read_from_socket(self.serial_port_path, termination_string, 120) + + def shutdown(self): + log.debug('Shutting down vbox machine `{name}\''.format(name=self.name)) + self.session.console.power_down().wait_for_completion(-1) + if not waituntil(lambda: self.machine.session_state == virtualbox.library.SessionState.unlocked): + raise LockingException('Timeout while waiting for the machine to become unlocked') + + def destroy(self): + log.debug('Destroying vbox machine `{name}\''.format(name=self.name)) + if hasattr(self, 'machine'): + try: + log.debug('Detaching SATA storage controller from vbox machine `{name}\''.format(name=self.name)) + with lock(self.machine, self.session) as machine: + machine.detach_device(name='SATA Controller', controller_port=0, device=0) + machine.save_settings() + except virtualbox.library.VBoxErrorObjectNotFound: + pass + log.debug('Unregistering and removing vbox machine `{name}\''.format(name=self.name)) + self.machine.unregister(virtualbox.library.CleanupMode.unregister_only) + self.machine.remove(delete=True) + else: + log.debug('vbox machine `{name}\' was not created, skipping destruction'.format(name=self.name)) + + +@contextmanager +def lock(machine, session): + if machine.session_state != virtualbox.library.SessionState.unlocked: + msg = ('Acquiring lock on machine failed, state was `{state}\' ' + 'instead of `Unlocked\'.'.format(state=str(machine.session_state))) + raise LockingException(msg) + + machine.lock_machine(session, virtualbox.library.LockType.write) + yield session.machine + + if machine.session_state != virtualbox.library.SessionState.locked: + if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): + msg = ('Error before trying to release lock on machine, state was `{state}\' ' + 'instead of `Locked\'.'.format(state=str(machine.session_state))) + raise LockingException(msg) + + session.unlock_machine() + + if not waituntil(lambda: machine.session_state == virtualbox.library.SessionState.unlocked): + msg = ('Timeout while trying to release lock on machine, ' + 'last state was `{state}\''.format(state=str(machine.session_state))) + raise LockingException(msg) + + +class LockingException(Exception): + pass diff --git a/tests/integration/tools/__init__.py b/tests/integration/tools/__init__.py new file mode 100644 index 0000000..1e82230 --- /dev/null +++ b/tests/integration/tools/__init__.py @@ -0,0 +1,89 @@ +from contextlib import contextmanager +from bootstrapvz.remote import register_deserialization_handlers +import logging +log = logging.getLogger(__name__) + +# Register deserialization handlers for objects +# that will pass between server and client +register_deserialization_handlers() + + +@contextmanager +def boot_manifest(manifest_data, boot_vars={}): + from bootstrapvz.common.tools import load_data + build_servers = load_data('build-servers.yml') + from bootstrapvz.remote.build_servers import pick_build_server + build_server = pick_build_server(build_servers, manifest_data) + + manifest_data = build_server.apply_build_settings(manifest_data) + from bootstrapvz.base.manifest import Manifest + manifest = Manifest(data=manifest_data) + + import importlib + provider_module = importlib.import_module('tests.integration.providers.' + manifest.provider['name']) + + prepare_bootstrap = getattr(provider_module, 'prepare_bootstrap', noop) + with prepare_bootstrap(manifest, build_server): + bootstrap_info = None + log.info('Connecting to build server') + with build_server.connect() as connection: + log.info('Building manifest') + bootstrap_info = connection.run(manifest) + + log.info('Creating and booting instance') + with provider_module.boot_image(manifest, build_server, bootstrap_info, **boot_vars) as instance: + yield instance + + +def waituntil(predicate, timeout=5, interval=0.05): + import time + threshhold = time.time() + timeout + while time.time() < threshhold: + if predicate(): + return True + time.sleep(interval) + return False + + +def read_from_socket(socket_path, termination_string, timeout, read_timeout=0.5): + import socket + import select + import errno + console = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + console.connect(socket_path) + console.setblocking(0) + + from timeit import default_timer + start = default_timer() + + output = '' + ptr = 0 + continue_select = True + while continue_select: + read_ready, _, _ = select.select([console], [], [], read_timeout) + if console in read_ready: + while True: + try: + output += console.recv(1024) + if termination_string in output[ptr:]: + continue_select = False + else: + ptr = len(output) - len(termination_string) + break + except socket.error, e: + if e.errno != errno.EWOULDBLOCK: + raise Exception(e) + continue_select = False + if default_timer() - start > timeout: + from exceptions import SocketReadTimeout + msg = ('Reading from socket `{path}\' timed out after {seconds} seconds.\n' + 'Here is the output so far:\n{output}' + .format(path=socket_path, seconds=timeout, output=output)) + raise SocketReadTimeout(msg) + console.close() + return output + + +@contextmanager +def noop(*args, **kwargs): + yield diff --git a/tests/integration/tools/exceptions.py b/tests/integration/tools/exceptions.py new file mode 100644 index 0000000..830c625 --- /dev/null +++ b/tests/integration/tools/exceptions.py @@ -0,0 +1,4 @@ + + +class SocketReadTimeout(Exception): + pass diff --git a/tests/integration/virtualbox_tests.py b/tests/integration/virtualbox_tests.py new file mode 100644 index 0000000..3453444 --- /dev/null +++ b/tests/integration/virtualbox_tests.py @@ -0,0 +1,111 @@ +from manifests import merge_manifest_data +from tools import boot_manifest +from unittest.case import SkipTest + +partials = {'vdi': '{provider: {name: virtualbox}, volume: {backing: vdi}}', + 'vmdk': '{provider: {name: virtualbox}, volume: {backing: vmdk}}', + } + + +def test_unpartitioned_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_msdos_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_gpt_extlinux_oldstable(): + std_partials = ['base', 'oldstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_unpartitioned_extlinux_stable(): + std_partials = ['base', 'stable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_msdos_extlinux_stable(): + std_partials = ['base', 'stable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_gpt_extlinux_stable(): + std_partials = ['base', 'stable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_msdos_grub_stable(): + std_partials = ['base', 'stable64', 'grub', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_gpt_grub_stable(): + std_partials = ['base', 'stable64', 'grub', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_unpartitioned_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'extlinux', 'unpartitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_msdos_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'extlinux', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_gpt_extlinux_unstable(): + std_partials = ['base', 'unstable64', 'extlinux', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_msdos_grub_unstable(): + std_partials = ['base', 'unstable64', 'grub', 'msdos', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) + + +def test_gpt_grub_unstable(): + std_partials = ['base', 'unstable64', 'grub', 'gpt', 'partitioned', 'root_password'] + custom_partials = [partials['vmdk']] + manifest_data = merge_manifest_data(std_partials, custom_partials) + with boot_manifest(manifest_data) as instance: + print(instance.console_output) diff --git a/tests/unit/README.rst b/tests/unit/README.rst new file mode 100644 index 0000000..a251ed9 --- /dev/null +++ b/tests/unit/README.rst @@ -0,0 +1,2 @@ +Unit tests +========== diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/bytes_tests.py b/tests/unit/bytes_tests.py new file mode 100644 index 0000000..7c95797 --- /dev/null +++ b/tests/unit/bytes_tests.py @@ -0,0 +1,108 @@ +from nose.tools import eq_ +from nose.tools import raises +from bootstrapvz.common.bytes import Bytes +from bootstrapvz.common.exceptions import UnitError + + +def test_lt(): + assert Bytes('1MiB') < Bytes('2MiB') + + +def test_le(): + assert Bytes('1MiB') <= Bytes('2MiB') + assert Bytes('1MiB') <= Bytes('1MiB') + + +def test_eq(): + eq_(Bytes('1MiB'), Bytes('1MiB')) + + +def test_neq(): + assert Bytes('15MiB') != Bytes('1MiB') + + +def test_gt(): + assert Bytes('2MiB') > Bytes('1MiB') + + +def test_ge(): + assert Bytes('2MiB') >= Bytes('1MiB') + assert Bytes('2MiB') >= Bytes('2MiB') + + +def test_eq_unit(): + eq_(Bytes('1024MiB'), Bytes('1GiB')) + + +def test_add(): + eq_(Bytes('2GiB'), Bytes('1GiB') + Bytes('1GiB')) + + +def test_iadd(): + b = Bytes('1GiB') + b += Bytes('1GiB') + eq_(Bytes('2GiB'), b) + + +def test_sub(): + eq_(Bytes('1GiB'), Bytes('2GiB') - Bytes('1GiB')) + + +def test_isub(): + b = Bytes('2GiB') + b -= Bytes('1GiB') + eq_(Bytes('1GiB'), b) + + +def test_mul(): + eq_(Bytes('2GiB'), Bytes('1GiB') * 2) + + +@raises(UnitError) +def test_mul_bytes(): + Bytes('1GiB') * Bytes('1GiB') + + +def test_imul(): + b = Bytes('1GiB') + b *= 2 + eq_(Bytes('2GiB'), b) + + +def test_div(): + eq_(Bytes('1GiB'), Bytes('2GiB') / 2) + + +def test_div_bytes(): + eq_(2, Bytes('2GiB') / Bytes('1GiB')) + + +def test_idiv(): + b = Bytes('2GiB') + b /= 2 + eq_(Bytes('1GiB'), b) + + +def test_mod(): + eq_(Bytes('256MiB'), Bytes('1GiB') % Bytes('768MiB')) + + +@raises(UnitError) +def test_mod_int(): + Bytes('1GiB') % 768 + + +def test_imod(): + b = Bytes('1GiB') + b %= Bytes('768MiB') + eq_(Bytes('256MiB'), b) + + +@raises(UnitError) +def test_imod_int(): + b = Bytes('1GiB') + b %= 5 + + +def test_convert_int(): + eq_(pow(1024, 3), int(Bytes('1GiB'))) diff --git a/tests/integration/manifests_tests.py b/tests/unit/manifests_tests.py similarity index 65% rename from tests/integration/manifests_tests.py rename to tests/unit/manifests_tests.py index 74a89d7..a3e0152 100644 --- a/tests/integration/manifests_tests.py +++ b/tests/unit/manifests_tests.py @@ -1,12 +1,6 @@ -import os from nose.tools import assert_true from bootstrapvz.base.manifest import Manifest -MANIFEST_DIR = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - '../../manifests' -) - def test_manifest_generator(): """ @@ -15,10 +9,16 @@ def test_manifest_generator(): Loops through the manifests directory and tests that each file can successfully be loaded and validated. """ - for fobj in os.listdir(MANIFEST_DIR): - path = os.path.join(os.path.abspath(MANIFEST_DIR), fobj) - - yield validate_manifests, path + import os.path + import glob + manifests = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + '../../manifests' + ) + for manifest_path in glob.glob(manifests + '/*.yml'): + yield validate_manifests, manifest_path + for manifest_path in glob.glob(manifests + '/*.json'): + yield validate_manifests, manifest_path def validate_manifests(path): @@ -29,7 +29,7 @@ def validate_manifests(path): and checks that all the data values have successfully been created. """ - manifest = Manifest(path) + manifest = Manifest(path=path) assert_true(manifest.data) assert_true(manifest.data['provider']) diff --git a/tests/unit/releases_tests.py b/tests/unit/releases_tests.py new file mode 100644 index 0000000..df7d588 --- /dev/null +++ b/tests/unit/releases_tests.py @@ -0,0 +1,39 @@ +from nose.tools import raises +from bootstrapvz.common import releases + + +def test_gt(): + assert releases.wheezy > releases.squeeze + + +def test_lt(): + assert releases.wheezy < releases.stretch + + +def test_eq(): + assert releases.wheezy == releases.wheezy + + +def test_neq(): + assert releases.wheezy != releases.jessie + + +def test_identity(): + assert releases.wheezy is releases.wheezy + + +def test_not_identity(): + assert releases.wheezy is not releases.stable + assert releases.stable is not releases.jessie + + +def test_alias(): + assert releases.oldstable == releases.wheezy + assert releases.stable == releases.jessie + assert releases.testing == releases.stretch + assert releases.unstable == releases.sid + + +@raises(releases.UnknownReleaseException) +def test_bogus_releasename(): + releases.get_release('nemo') diff --git a/tests/unit/sectors_tests.py b/tests/unit/sectors_tests.py new file mode 100644 index 0000000..25ebdc0 --- /dev/null +++ b/tests/unit/sectors_tests.py @@ -0,0 +1,127 @@ +from nose.tools import eq_ +from nose.tools import raises +from bootstrapvz.common.sectors import Sectors +from bootstrapvz.common.bytes import Bytes +from bootstrapvz.common.exceptions import UnitError + +std_secsz = Bytes(512) + + +def test_init_with_int(): + secsize = 4096 + eq_(Sectors('1MiB', secsize), Sectors(256, secsize)) + + +def test_lt(): + assert Sectors('1MiB', std_secsz) < Sectors('2MiB', std_secsz) + + +def test_le(): + assert Sectors('1MiB', std_secsz) <= Sectors('2MiB', std_secsz) + assert Sectors('1MiB', std_secsz) <= Sectors('1MiB', std_secsz) + + +def test_eq(): + eq_(Sectors('1MiB', std_secsz), Sectors('1MiB', std_secsz)) + + +def test_neq(): + assert Sectors('15MiB', std_secsz) != Sectors('1MiB', std_secsz) + + +def test_gt(): + assert Sectors('2MiB', std_secsz) > Sectors('1MiB', std_secsz) + + +def test_ge(): + assert Sectors('2MiB', std_secsz) >= Sectors('1MiB', std_secsz) + assert Sectors('2MiB', std_secsz) >= Sectors('2MiB', std_secsz) + + +def test_eq_unit(): + eq_(Sectors('1024MiB', std_secsz), Sectors('1GiB', std_secsz)) + + +def test_add(): + eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) + Sectors('1GiB', std_secsz)) + + +@raises(UnitError) +def test_add_with_diff_secsize(): + Sectors('1GiB', Bytes(512)) + Sectors('1GiB', Bytes(4096)) + + +def test_iadd(): + s = Sectors('1GiB', std_secsz) + s += Sectors('1GiB', std_secsz) + eq_(Sectors('2GiB', std_secsz), s) + + +def test_sub(): + eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) - Sectors('1GiB', std_secsz)) + + +def test_sub_int(): + secsize = Bytes('4KiB') + eq_(Sectors('1MiB', secsize), Sectors('1028KiB', secsize) - 1) + + +def test_isub(): + s = Sectors('2GiB', std_secsz) + s -= Sectors('1GiB', std_secsz) + eq_(Sectors('1GiB', std_secsz), s) + + +def test_mul(): + eq_(Sectors('2GiB', std_secsz), Sectors('1GiB', std_secsz) * 2) + + +@raises(UnitError) +def test_mul_bytes(): + Sectors('1GiB', std_secsz) * Sectors('1GiB', std_secsz) + + +def test_imul(): + s = Sectors('1GiB', std_secsz) + s *= 2 + eq_(Sectors('2GiB', std_secsz), s) + + +def test_div(): + eq_(Sectors('1GiB', std_secsz), Sectors('2GiB', std_secsz) / 2) + + +def test_div_bytes(): + eq_(2, Sectors('2GiB', std_secsz) / Sectors('1GiB', std_secsz)) + + +def test_idiv(): + s = Sectors('2GiB', std_secsz) + s /= 2 + eq_(Sectors('1GiB', std_secsz), s) + + +def test_mod(): + eq_(Sectors('256MiB', std_secsz), Sectors('1GiB', std_secsz) % Sectors('768MiB', std_secsz)) + + +@raises(UnitError) +def test_mod_int(): + Sectors('1GiB', std_secsz) % 768 + + +def test_imod(): + s = Sectors('1GiB', std_secsz) + s %= Sectors('768MiB', std_secsz) + eq_(Sectors('256MiB', std_secsz), s) + + +@raises(UnitError) +def test_imod_int(): + s = Sectors('1GiB', std_secsz) + s %= 5 + + +def test_convert_int(): + secsize = 512 + eq_(pow(1024, 3) / secsize, int(Sectors('1GiB', secsize))) diff --git a/tests/integration/subprocess.sh b/tests/unit/subprocess.sh similarity index 100% rename from tests/integration/subprocess.sh rename to tests/unit/subprocess.sh diff --git a/tests/integration/tools_tests.py b/tests/unit/tools_tests.py similarity index 100% rename from tests/integration/tools_tests.py rename to tests/unit/tools_tests.py diff --git a/tox.ini b/tox.ini index b4a76b4..d0a1ba1 100644 --- a/tox.ini +++ b/tox.ini @@ -1,16 +1,33 @@ +[tox] +envlist = flake8, unit + [flake8] ignore = E101,E221,E241,E501,W191 max-line-length = 110 -[tox] -envlist = flake8, integration [testenv:flake8] deps = flake8 -commands = flake8 bootstrapvz/ --exclude=minify_json.py +commands = flake8 bootstrapvz/ --exclude=minify_json.py {posargs} + +[testenv:unit] +deps = + nose + nose-cov +commands = nosetests --with-coverage --cover-package=bootstrapvz --cover-inclusive --verbose {posargs:tests/unit} [testenv:integration] deps = nose nose-cov -commands = nosetests -v tests/integration --with-coverage --cover-package=bootstrapvz --cover-inclusive + nose-htmloutput + Pyro4 >= 4.30 + pyvbox >= 0.2.0 +commands = nosetests --with-coverage --cover-package=bootstrapvz --cover-inclusive --with-html --html-file=integration.html --verbose {posargs:tests/integration} + +[testenv:docs] +changedir = docs +deps = + sphinx +commands = + sphinx-build -W -b html -d _build/html/doctrees . _build/html