mirror of
https://github.com/kevingruesser/bootstrap-vz.git
synced 2025-08-24 07:26:29 +00:00
pull latest updates and fix merge for opennebula modifications
This commit is contained in:
commit
c53e7bd467
72 changed files with 1225 additions and 409 deletions
|
@ -1,12 +0,0 @@
|
||||||
# Coding standards #
|
|
||||||
* Specify the full path when invoking a command.
|
|
||||||
* Use long options whenever possible, this makes the commands invoked a lot easier to understand.
|
|
||||||
* Use tabs for indentation and spaces for alignment.
|
|
||||||
* Max line length is 110 chars.
|
|
||||||
* Multiple assignments may be aligned.
|
|
||||||
* Follow PEP8 with the exception of the following rules
|
|
||||||
* E101: Indenting with tabs and aligning with spaces
|
|
||||||
* E221: Alignment of assignments
|
|
||||||
* E241: Alignment of assignments
|
|
||||||
* E501: The line length is 110 characters not 80
|
|
||||||
* W191: We indent with tabs not spaces
|
|
41
README.md
41
README.md
|
@ -1,31 +1,20 @@
|
||||||
bootstrap-vz
|
bootstrap-vz
|
||||||
===========================================
|
===========================================
|
||||||
|
|
||||||
bootstrap-vz is a fully automated bootstrapping tool for Debian.
|
bootstrap-vz is a bootstrapping framework for Debian.
|
||||||
It creates images for various virtualized platforms (at the moment: kvm, virtualbox, ec2).
|
It is is specifically targeted at bootstrapping systems for virtualized environments.
|
||||||
The plugin architecture allows for heavy modification of standard behavior
|
bootstrap-vz runs without any user intervention and generates ready-to-boot images for
|
||||||
(e.g. create a vagrant box, apply puppet manifests, run custom shell commands).
|
[a number of virtualization platforms](http://andsens.github.io/bootstrap-vz/providers.html).
|
||||||
|
Its aim is to provide a reproducable bootstrapping process using [manifests](http://andsens.github.io/bootstrap-vz/manifest.html) as well as supporting a high degree of customizability through plugins.
|
||||||
|
|
||||||
At no time is the resulting image booted, meaning there are no latent logfiles
|
bootstrap-vz was coded from scratch in python once the bash script architecture that was used in the
|
||||||
or bash_history files.
|
[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached its
|
||||||
|
limits.
|
||||||
|
|
||||||
The bootstrapper runs on a single json manifest file which contains all configurable
|
Documentation
|
||||||
parameters. This allows you to recreate the image whenever you like so you can create
|
-------------
|
||||||
an updated version of an existing image or create the same image in multiple EC2 regions.
|
The documentation for bootstrap-vz is available
|
||||||
|
at [andsens.github.io/bootstrap-vz](http://andsens.github.io/bootstrap-vz).
|
||||||
Dependencies
|
There, you can discover [what the dependencies](http://andsens.github.io/bootstrap-vz/#dependencies)
|
||||||
------------
|
for a specific cloud provider are, [see a list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html)
|
||||||
You will need to run debian wheezy with **python 2.7** and **debootstrap** installed.
|
and learn [how you create a manifest](http://andsens.github.io/bootstrap-vz/manifest.html).
|
||||||
Other depencies include:
|
|
||||||
* qemu-utils
|
|
||||||
* parted
|
|
||||||
* grub2
|
|
||||||
* euca2ools
|
|
||||||
* xfsprogs (If you want to use XFS as a filesystem)
|
|
||||||
Also the following python libraries are required:
|
|
||||||
* **boto** ([version 2.14.0 or higher](https://github.com/boto/boto))
|
|
||||||
* **jsonschema** ([version 2.0.0](https://pypi.python.org/pypi/jsonschema), only available through pip)
|
|
||||||
* **termcolor**
|
|
||||||
* **fysom**
|
|
||||||
|
|
||||||
Bootstrapping instance store AMIs requires **euca2ools** to be installed.
|
|
||||||
|
|
|
@ -5,6 +5,13 @@ from main import main
|
||||||
|
|
||||||
|
|
||||||
def validate_manifest(data, validator, error):
|
def validate_manifest(data, validator, error):
|
||||||
|
"""Validates the manifest using the base manifest
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data (dict): The data of the manifest
|
||||||
|
validator (function): The function that validates the manifest given the data and a path
|
||||||
|
error (function): The function tha raises an error when the validation fails
|
||||||
|
"""
|
||||||
import os.path
|
import os.path
|
||||||
schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json'))
|
schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json'))
|
||||||
validator(data, schema_path)
|
validator(data, schema_path)
|
||||||
|
|
|
@ -1,22 +1,46 @@
|
||||||
|
|
||||||
|
|
||||||
class BootstrapInformation(object):
|
class BootstrapInformation(object):
|
||||||
|
"""The BootstrapInformation class holds all information about the bootstrapping process.
|
||||||
|
The nature of the attributes of this class are rather diverse.
|
||||||
|
Tasks may set their own attributes on this class for later retrieval by another task.
|
||||||
|
Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed.
|
||||||
|
"""
|
||||||
def __init__(self, manifest=None, debug=False):
|
def __init__(self, manifest=None, debug=False):
|
||||||
|
"""Instantiates a new bootstrap info object.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
manifest (Manifest): The manifest
|
||||||
|
debug (bool): Whether debugging is turned on
|
||||||
|
"""
|
||||||
|
# Set the manifest attribute.
|
||||||
self.manifest = manifest
|
self.manifest = manifest
|
||||||
self.debug = debug
|
self.debug = debug
|
||||||
|
|
||||||
|
# Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process
|
||||||
import random
|
import random
|
||||||
self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8))
|
self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8))
|
||||||
|
|
||||||
|
# Define the path to our workspace
|
||||||
import os.path
|
import os.path
|
||||||
self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id)
|
self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id)
|
||||||
|
|
||||||
|
# Load all the volume information
|
||||||
from fs import load_volume
|
from fs import load_volume
|
||||||
self.volume = load_volume(self.manifest.volume, manifest.system['bootloader'])
|
self.volume = load_volume(self.manifest.volume, manifest.system['bootloader'])
|
||||||
|
|
||||||
|
# The default apt mirror
|
||||||
self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian')
|
self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian')
|
||||||
|
|
||||||
|
# Normalize the release codenames so that tasks may query for release codenames rather than
|
||||||
|
# 'stable', 'unstable' etc. This is useful when handling cases that are specific to a release.
|
||||||
|
release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json')
|
||||||
|
from common.tools import config_get
|
||||||
|
self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']])
|
||||||
|
|
||||||
class DictClass(dict):
|
class DictClass(dict):
|
||||||
|
"""Tiny extension of dict to allow setting and getting keys via attributes
|
||||||
|
"""
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
return self[name]
|
return self[name]
|
||||||
|
|
||||||
|
@ -24,18 +48,29 @@ class BootstrapInformation(object):
|
||||||
self[name] = value
|
self[name] = value
|
||||||
|
|
||||||
def set_manifest_vars(obj, data):
|
def set_manifest_vars(obj, data):
|
||||||
|
"""Runs through the manifest and creates DictClasses for every key
|
||||||
|
|
||||||
|
Args:
|
||||||
|
obj (dict): dictionary to set the values on
|
||||||
|
data (dict): dictionary of values to set on the obj
|
||||||
|
"""
|
||||||
for key, value in data.iteritems():
|
for key, value in data.iteritems():
|
||||||
if isinstance(value, dict):
|
if isinstance(value, dict):
|
||||||
obj[key] = DictClass()
|
obj[key] = DictClass()
|
||||||
set_manifest_vars(obj[key], value)
|
set_manifest_vars(obj[key], value)
|
||||||
continue
|
continue
|
||||||
|
# Lists are not supported
|
||||||
if not isinstance(value, list):
|
if not isinstance(value, list):
|
||||||
obj[key] = value
|
obj[key] = value
|
||||||
|
|
||||||
|
# manifest_vars is a dictionary of all the manifest values,
|
||||||
|
# with it users can cross-reference values in the manifest, so that they do not need to be written twice
|
||||||
self.manifest_vars = {}
|
self.manifest_vars = {}
|
||||||
self.manifest_vars['apt_mirror'] = self.apt_mirror
|
self.manifest_vars['apt_mirror'] = self.apt_mirror
|
||||||
set_manifest_vars(self.manifest_vars, self.manifest.data)
|
set_manifest_vars(self.manifest_vars, self.manifest.data)
|
||||||
|
|
||||||
|
# Populate the manifest_vars with datetime information
|
||||||
|
# and map the datetime variables directly to the dictionary
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
|
time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
|
||||||
|
@ -44,13 +79,23 @@ class BootstrapInformation(object):
|
||||||
for key in time_vars:
|
for key in time_vars:
|
||||||
self.manifest_vars[key] = now.strftime(key)
|
self.manifest_vars[key] = now.strftime(key)
|
||||||
|
|
||||||
|
# Keep a list of apt sources,
|
||||||
|
# so that tasks may add to that list without having to fiddle with apt source list files.
|
||||||
from pkg.sourceslist import SourceLists
|
from pkg.sourceslist import SourceLists
|
||||||
self.source_lists = SourceLists(self.manifest_vars)
|
self.source_lists = SourceLists(self.manifest_vars)
|
||||||
|
# Keep a list of packages that should be installed, tasks can add and remove things from this list
|
||||||
from pkg.packagelist import PackageList
|
from pkg.packagelist import PackageList
|
||||||
self.packages = PackageList(self.manifest_vars, self.source_lists)
|
self.packages = PackageList(self.manifest_vars, self.source_lists)
|
||||||
|
|
||||||
|
# These sets should rarely be used and specify which packages the debootstrap invocation
|
||||||
|
# should be called with.
|
||||||
self.include_packages = set()
|
self.include_packages = set()
|
||||||
self.exclude_packages = set()
|
self.exclude_packages = set()
|
||||||
|
|
||||||
self.host_dependencies = set()
|
# Dictionary to specify which commands are required on the host.
|
||||||
|
# The keys are commands, while the values are either package names or urls
|
||||||
|
# that hint at how a command may be made available.
|
||||||
|
self.host_dependencies = {}
|
||||||
|
|
||||||
|
# Lists of startup scripts that should be installed and disabled
|
||||||
self.initd = {'install': {}, 'disable': []}
|
self.initd = {'install': {}, 'disable': []}
|
||||||
|
|
|
@ -1,10 +1,19 @@
|
||||||
|
|
||||||
|
|
||||||
def load_volume(data, bootloader):
|
def load_volume(data, bootloader):
|
||||||
|
"""Instantiates a volume that corresponds to the data in the manifest
|
||||||
|
Args:
|
||||||
|
data (dict): The 'volume' section from the manifest
|
||||||
|
bootloader (str): Name of the bootloader the system will boot with
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Volume. The volume that represents all information pertaining to the volume we bootstrap on
|
||||||
|
"""
|
||||||
from common.fs.loopbackvolume import LoopbackVolume
|
from common.fs.loopbackvolume import LoopbackVolume
|
||||||
from providers.ec2.ebsvolume import EBSVolume
|
from providers.ec2.ebsvolume import EBSVolume
|
||||||
from common.fs.virtualdiskimage import VirtualDiskImage
|
from common.fs.virtualdiskimage import VirtualDiskImage
|
||||||
from common.fs.virtualmachinedisk import VirtualMachineDisk
|
from common.fs.virtualmachinedisk import VirtualMachineDisk
|
||||||
|
# Create a mapping between valid partition maps in the manifest and their corresponding classes
|
||||||
from partitionmaps.gpt import GPTPartitionMap
|
from partitionmaps.gpt import GPTPartitionMap
|
||||||
from partitionmaps.msdos import MSDOSPartitionMap
|
from partitionmaps.msdos import MSDOSPartitionMap
|
||||||
from partitionmaps.none import NoPartitions
|
from partitionmaps.none import NoPartitions
|
||||||
|
@ -12,11 +21,14 @@ def load_volume(data, bootloader):
|
||||||
'gpt': GPTPartitionMap,
|
'gpt': GPTPartitionMap,
|
||||||
'msdos': MSDOSPartitionMap,
|
'msdos': MSDOSPartitionMap,
|
||||||
}
|
}
|
||||||
|
# Instantiate the partition map
|
||||||
partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader)
|
partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader)
|
||||||
|
# Create a mapping between valid volume backings in the manifest and their corresponding classes
|
||||||
volume_backings = {'raw': LoopbackVolume,
|
volume_backings = {'raw': LoopbackVolume,
|
||||||
's3': LoopbackVolume,
|
's3': LoopbackVolume,
|
||||||
'vdi': VirtualDiskImage,
|
'vdi': VirtualDiskImage,
|
||||||
'vmdk': VirtualMachineDisk,
|
'vmdk': VirtualMachineDisk,
|
||||||
'ebs': EBSVolume
|
'ebs': EBSVolume
|
||||||
}
|
}
|
||||||
|
# Create the volume with the partition map as an argument
|
||||||
return volume_backings.get(data['backing'])(partition_map)
|
return volume_backings.get(data['backing'])(partition_map)
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
|
|
||||||
|
|
||||||
class VolumeError(Exception):
|
class VolumeError(Exception):
|
||||||
|
"""Raised when an error occurs while interacting with the volume
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class PartitionError(Exception):
|
class PartitionError(Exception):
|
||||||
|
"""Raised when an error occurs while interacting with the partitions on the volume
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -6,25 +6,50 @@ from ..exceptions import PartitionError
|
||||||
|
|
||||||
|
|
||||||
class AbstractPartitionMap(FSMProxy):
|
class AbstractPartitionMap(FSMProxy):
|
||||||
|
"""Abstract representation of a partiton map
|
||||||
|
This class is a finite state machine and represents the state of the real partition map
|
||||||
|
"""
|
||||||
|
|
||||||
__metaclass__ = ABCMeta
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
# States the partition map can be in
|
||||||
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
||||||
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
||||||
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, bootloader):
|
def __init__(self, bootloader):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
bootloader (str): Name of the bootloader we will use for bootstrapping
|
||||||
|
"""
|
||||||
|
# Create the configuration for the state machine
|
||||||
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
|
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
|
||||||
super(AbstractPartitionMap, self).__init__(cfg)
|
super(AbstractPartitionMap, self).__init__(cfg)
|
||||||
|
|
||||||
def is_blocking(self):
|
def is_blocking(self):
|
||||||
|
"""Returns whether the partition map is blocking volume detach operations
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
return self.fsm.current == 'mapped'
|
return self.fsm.current == 'mapped'
|
||||||
|
|
||||||
def get_total_size(self):
|
def get_total_size(self):
|
||||||
|
"""Returns the total size the partitions occupy
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bytes. The size of all the partitions
|
||||||
|
"""
|
||||||
|
# We just need the endpoint of the last partition
|
||||||
return self.partitions[-1].get_end()
|
return self.partitions[-1].get_end()
|
||||||
|
|
||||||
def create(self, volume):
|
def create(self, volume):
|
||||||
|
"""Creates the partition map
|
||||||
|
|
||||||
|
Args:
|
||||||
|
volume (Volume): The volume to create the partition map on
|
||||||
|
"""
|
||||||
self.fsm.create(volume=volume)
|
self.fsm.create(volume=volume)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -32,19 +57,30 @@ class AbstractPartitionMap(FSMProxy):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def map(self, volume):
|
def map(self, volume):
|
||||||
|
"""Maps the partition map to device nodes
|
||||||
|
|
||||||
|
Args:
|
||||||
|
volume (Volume): The volume the partition map resides on
|
||||||
|
"""
|
||||||
self.fsm.map(volume=volume)
|
self.fsm.map(volume=volume)
|
||||||
|
|
||||||
def _before_map(self, event):
|
def _before_map(self, event):
|
||||||
|
"""
|
||||||
|
Raises:
|
||||||
|
PartitionError
|
||||||
|
"""
|
||||||
volume = event.volume
|
volume = event.volume
|
||||||
try:
|
try:
|
||||||
mappings = log_check_call(['/sbin/kpartx', '-l', volume.device_path])
|
# Ask kpartx how the partitions will be mapped before actually attaching them.
|
||||||
|
mappings = log_check_call(['kpartx', '-l', volume.device_path])
|
||||||
import re
|
import re
|
||||||
regexp = re.compile('^(?P<name>.+[^\d](?P<p_idx>\d+)) : '
|
regexp = re.compile('^(?P<name>.+[^\d](?P<p_idx>\d+)) : '
|
||||||
'(?P<start_blk>\d) (?P<num_blks>\d+) '
|
'(?P<start_blk>\d) (?P<num_blks>\d+) '
|
||||||
'{device_path} (?P<blk_offset>\d+)$'
|
'{device_path} (?P<blk_offset>\d+)$'
|
||||||
.format(device_path=volume.device_path))
|
.format(device_path=volume.device_path))
|
||||||
log_check_call(['/sbin/kpartx', '-a', volume.device_path])
|
log_check_call(['kpartx', '-a', volume.device_path])
|
||||||
import os.path
|
import os.path
|
||||||
|
# Run through the kpartx output and map the paths to the partitions
|
||||||
for mapping in mappings:
|
for mapping in mappings:
|
||||||
match = regexp.match(mapping)
|
match = regexp.match(mapping)
|
||||||
if match is None:
|
if match is None:
|
||||||
|
@ -53,26 +89,40 @@ class AbstractPartitionMap(FSMProxy):
|
||||||
p_idx = int(match.group('p_idx')) - 1
|
p_idx = int(match.group('p_idx')) - 1
|
||||||
self.partitions[p_idx].map(partition_path)
|
self.partitions[p_idx].map(partition_path)
|
||||||
|
|
||||||
|
# Check if any partition was not mapped
|
||||||
for idx, partition in enumerate(self.partitions):
|
for idx, partition in enumerate(self.partitions):
|
||||||
if partition.fsm.current not in ['mapped', 'formatted']:
|
if partition.fsm.current not in ['mapped', 'formatted']:
|
||||||
raise PartitionError('kpartx did not map partition #{idx}'.format(idx=idx + 1))
|
raise PartitionError('kpartx did not map partition #{idx}'.format(idx=idx + 1))
|
||||||
|
|
||||||
except PartitionError as e:
|
except PartitionError as e:
|
||||||
|
# Revert any mapping and reraise the error
|
||||||
for partition in self.partitions:
|
for partition in self.partitions:
|
||||||
if not partition.fsm.can('unmap'):
|
if not partition.fsm.can('unmap'):
|
||||||
partition.unmap()
|
partition.unmap()
|
||||||
log_check_call(['/sbin/kpartx', '-d', volume.device_path])
|
log_check_call(['kpartx', '-d', volume.device_path])
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
def unmap(self, volume):
|
def unmap(self, volume):
|
||||||
|
"""Unmaps the partition
|
||||||
|
|
||||||
|
Args:
|
||||||
|
volume (Volume): The volume to unmap the partition map from
|
||||||
|
"""
|
||||||
self.fsm.unmap(volume=volume)
|
self.fsm.unmap(volume=volume)
|
||||||
|
|
||||||
def _before_unmap(self, event):
|
def _before_unmap(self, event):
|
||||||
|
"""
|
||||||
|
Raises:
|
||||||
|
PartitionError
|
||||||
|
"""
|
||||||
volume = event.volume
|
volume = event.volume
|
||||||
|
# Run through all partitions before unmapping and make sure they can all be unmapped
|
||||||
for partition in self.partitions:
|
for partition in self.partitions:
|
||||||
if partition.fsm.cannot('unmap'):
|
if partition.fsm.cannot('unmap'):
|
||||||
msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition)
|
msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition)
|
||||||
raise PartitionError(msg)
|
raise PartitionError(msg)
|
||||||
log_check_call(['/sbin/kpartx', '-d', volume.device_path])
|
# Actually unmap the partitions
|
||||||
|
log_check_call(['kpartx', '-d', volume.device_path])
|
||||||
|
# Call unmap on all partitions
|
||||||
for partition in self.partitions:
|
for partition in self.partitions:
|
||||||
partition.unmap()
|
partition.unmap()
|
||||||
|
|
|
@ -5,34 +5,53 @@ from common.tools import log_check_call
|
||||||
|
|
||||||
|
|
||||||
class GPTPartitionMap(AbstractPartitionMap):
|
class GPTPartitionMap(AbstractPartitionMap):
|
||||||
|
"""Represents a GPT partition map
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, data, bootloader):
|
def __init__(self, data, bootloader):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
data (dict): volume.partitions part of the manifest
|
||||||
|
bootloader (str): Name of the bootloader we will use for bootstrapping
|
||||||
|
"""
|
||||||
from common.bytes import Bytes
|
from common.bytes import Bytes
|
||||||
|
# List of partitions
|
||||||
self.partitions = []
|
self.partitions = []
|
||||||
|
|
||||||
|
# Returns the last partition unless there is none
|
||||||
def last_partition():
|
def last_partition():
|
||||||
return self.partitions[-1] if len(self.partitions) > 0 else None
|
return self.partitions[-1] if len(self.partitions) > 0 else None
|
||||||
|
|
||||||
|
# GPT offset
|
||||||
gpt_offset = Bytes('17KiB')
|
gpt_offset = Bytes('17KiB')
|
||||||
|
|
||||||
|
# If we are using the grub bootloader we need to create an unformatted partition
|
||||||
|
# at the beginning of the map. Its size is 1007kb, which we will steal from the
|
||||||
|
# next partition.
|
||||||
if bootloader == 'grub':
|
if bootloader == 'grub':
|
||||||
from ..partitions.unformatted import UnformattedPartition
|
from ..partitions.unformatted import UnformattedPartition
|
||||||
self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition())
|
self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition())
|
||||||
self.grub_boot.offset = gpt_offset
|
self.grub_boot.offset = gpt_offset
|
||||||
|
# Mark the partition as a bios_grub partition
|
||||||
self.grub_boot.flags.append('bios_grub')
|
self.grub_boot.flags.append('bios_grub')
|
||||||
self.partitions.append(self.grub_boot)
|
self.partitions.append(self.grub_boot)
|
||||||
|
|
||||||
|
# The boot and swap partitions are optional
|
||||||
if 'boot' in data:
|
if 'boot' in data:
|
||||||
self.boot = GPTPartition(Bytes(data['boot']['size']), data['boot']['filesystem'],
|
self.boot = GPTPartition(Bytes(data['boot']['size']),
|
||||||
|
data['boot']['filesystem'], data['boot'].get('format_command', None),
|
||||||
'boot', last_partition())
|
'boot', last_partition())
|
||||||
self.partitions.append(self.boot)
|
self.partitions.append(self.boot)
|
||||||
if 'swap' in data:
|
if 'swap' in data:
|
||||||
self.swap = GPTSwapPartition(Bytes(data['swap']['size']), last_partition())
|
self.swap = GPTSwapPartition(Bytes(data['swap']['size']), last_partition())
|
||||||
self.partitions.append(self.swap)
|
self.partitions.append(self.swap)
|
||||||
self.root = GPTPartition(Bytes(data['root']['size']), data['root']['filesystem'],
|
self.root = GPTPartition(Bytes(data['root']['size']),
|
||||||
|
data['root']['filesystem'], data['root'].get('format_command', None),
|
||||||
'root', last_partition())
|
'root', last_partition())
|
||||||
self.partitions.append(self.root)
|
self.partitions.append(self.root)
|
||||||
|
|
||||||
|
# Depending on whether we have a grub boot partition
|
||||||
|
# we will need to set the offset accordingly.
|
||||||
if hasattr(self, 'grub_boot'):
|
if hasattr(self, 'grub_boot'):
|
||||||
self.partitions[1].size -= gpt_offset
|
self.partitions[1].size -= gpt_offset
|
||||||
self.partitions[1].size -= self.grub_boot.size
|
self.partitions[1].size -= self.grub_boot.size
|
||||||
|
@ -43,8 +62,13 @@ class GPTPartitionMap(AbstractPartitionMap):
|
||||||
super(GPTPartitionMap, self).__init__(bootloader)
|
super(GPTPartitionMap, self).__init__(bootloader)
|
||||||
|
|
||||||
def _before_create(self, event):
|
def _before_create(self, event):
|
||||||
|
"""Creates the partition map
|
||||||
|
"""
|
||||||
volume = event.volume
|
volume = event.volume
|
||||||
log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path,
|
# Disk alignment still plays a role in virtualized environment,
|
||||||
|
# but I honestly have no clue as to what best practice is here, so we choose 'none'
|
||||||
|
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
|
||||||
'--', 'mklabel', 'gpt'])
|
'--', 'mklabel', 'gpt'])
|
||||||
|
# Create the partitions
|
||||||
for partition in self.partitions:
|
for partition in self.partitions:
|
||||||
partition.create(volume)
|
partition.create(volume)
|
||||||
|
|
|
@ -5,25 +5,43 @@ from common.tools import log_check_call
|
||||||
|
|
||||||
|
|
||||||
class MSDOSPartitionMap(AbstractPartitionMap):
|
class MSDOSPartitionMap(AbstractPartitionMap):
|
||||||
|
"""Represents a MS-DOS partition map
|
||||||
|
Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is)
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, data, bootloader):
|
def __init__(self, data, bootloader):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
data (dict): volume.partitions part of the manifest
|
||||||
|
bootloader (str): Name of the bootloader we will use for bootstrapping
|
||||||
|
"""
|
||||||
from common.bytes import Bytes
|
from common.bytes import Bytes
|
||||||
|
# List of partitions
|
||||||
self.partitions = []
|
self.partitions = []
|
||||||
|
|
||||||
|
# Returns the last partition unless there is none
|
||||||
def last_partition():
|
def last_partition():
|
||||||
return self.partitions[-1] if len(self.partitions) > 0 else None
|
return self.partitions[-1] if len(self.partitions) > 0 else None
|
||||||
|
|
||||||
|
# The boot and swap partitions are optional
|
||||||
if 'boot' in data:
|
if 'boot' in data:
|
||||||
self.boot = MSDOSPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], None)
|
self.boot = MSDOSPartition(Bytes(data['boot']['size']),
|
||||||
|
data['boot']['filesystem'], data['boot'].get('format_command', None),
|
||||||
|
last_partition())
|
||||||
self.partitions.append(self.boot)
|
self.partitions.append(self.boot)
|
||||||
if 'swap' in data:
|
if 'swap' in data:
|
||||||
self.swap = MSDOSSwapPartition(Bytes(data['swap']['size']), last_partition())
|
self.swap = MSDOSSwapPartition(Bytes(data['swap']['size']), last_partition())
|
||||||
self.partitions.append(self.swap)
|
self.partitions.append(self.swap)
|
||||||
self.root = MSDOSPartition(Bytes(data['root']['size']), data['root']['filesystem'], last_partition())
|
self.root = MSDOSPartition(Bytes(data['root']['size']),
|
||||||
|
data['root']['filesystem'], data['root'].get('format_command', None),
|
||||||
|
last_partition())
|
||||||
self.partitions.append(self.root)
|
self.partitions.append(self.root)
|
||||||
|
|
||||||
|
# Mark boot as the boot partition, or root, if boot does not exist
|
||||||
getattr(self, 'boot', self.root).flags.append('boot')
|
getattr(self, 'boot', self.root).flags.append('boot')
|
||||||
|
|
||||||
|
# If we are using the grub bootloader, we will need to create a 2 MB offset at the beginning
|
||||||
|
# of the partitionmap and steal it from the first partition
|
||||||
if bootloader == 'grub':
|
if bootloader == 'grub':
|
||||||
self.partitions[0].offset = Bytes('2MiB')
|
self.partitions[0].offset = Bytes('2MiB')
|
||||||
self.partitions[0].size -= self.partitions[0].offset
|
self.partitions[0].size -= self.partitions[0].offset
|
||||||
|
@ -32,7 +50,10 @@ class MSDOSPartitionMap(AbstractPartitionMap):
|
||||||
|
|
||||||
def _before_create(self, event):
|
def _before_create(self, event):
|
||||||
volume = event.volume
|
volume = event.volume
|
||||||
log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path,
|
# Disk alignment still plays a role in virtualized environment,
|
||||||
|
# but I honestly have no clue as to what best practice is here, so we choose 'none'
|
||||||
|
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
|
||||||
'--', 'mklabel', 'msdos'])
|
'--', 'mklabel', 'msdos'])
|
||||||
|
# Create the partitions
|
||||||
for partition in self.partitions:
|
for partition in self.partitions:
|
||||||
partition.create(volume)
|
partition.create(volume)
|
||||||
|
|
|
@ -2,14 +2,35 @@ from ..partitions.single import SinglePartition
|
||||||
|
|
||||||
|
|
||||||
class NoPartitions(object):
|
class NoPartitions(object):
|
||||||
|
"""Represents a virtual 'NoPartitions' partitionmap.
|
||||||
|
This virtual partition map exists because it is easier for tasks to
|
||||||
|
simply always deal with partition maps and then let the base abstract that away.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, data, bootloader):
|
def __init__(self, data, bootloader):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
data (dict): volume.partitions part of the manifest
|
||||||
|
bootloader (str): Name of the bootloader we will use for bootstrapping
|
||||||
|
"""
|
||||||
from common.bytes import Bytes
|
from common.bytes import Bytes
|
||||||
self.root = SinglePartition(Bytes(data['root']['size']), data['root']['filesystem'])
|
# In the NoPartitions partitions map we only have a single 'partition'
|
||||||
|
self.root = SinglePartition(Bytes(data['root']['size']),
|
||||||
|
data['root']['filesystem'], data['root'].get('format_command', None))
|
||||||
self.partitions = [self.root]
|
self.partitions = [self.root]
|
||||||
|
|
||||||
def is_blocking(self):
|
def is_blocking(self):
|
||||||
|
"""Returns whether the partition map is blocking volume detach operations
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
return self.root.fsm.current == 'mounted'
|
return self.root.fsm.current == 'mounted'
|
||||||
|
|
||||||
def get_total_size(self):
|
def get_total_size(self):
|
||||||
|
"""Returns the total size the partitions occupy
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bytes. The size of all the partitions
|
||||||
|
"""
|
||||||
return self.root.get_end()
|
return self.root.get_end()
|
||||||
|
|
|
@ -6,9 +6,13 @@ from common.fsm_proxy import FSMProxy
|
||||||
|
|
||||||
|
|
||||||
class AbstractPartition(FSMProxy):
|
class AbstractPartition(FSMProxy):
|
||||||
|
"""Abstract representation of a partiton
|
||||||
|
This class is a finite state machine and represents the state of the real partition
|
||||||
|
"""
|
||||||
|
|
||||||
__metaclass__ = ABCMeta
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
# Our states
|
||||||
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'},
|
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'},
|
||||||
{'name': 'format', 'src': 'created', 'dst': 'formatted'},
|
{'name': 'format', 'src': 'created', 'dst': 'formatted'},
|
||||||
{'name': 'mount', 'src': 'formatted', 'dst': 'mounted'},
|
{'name': 'mount', 'src': 'formatted', 'dst': 'mounted'},
|
||||||
|
@ -16,37 +20,68 @@ class AbstractPartition(FSMProxy):
|
||||||
]
|
]
|
||||||
|
|
||||||
class Mount(object):
|
class Mount(object):
|
||||||
|
"""Represents a mount into the partition
|
||||||
|
"""
|
||||||
def __init__(self, source, destination, opts):
|
def __init__(self, source, destination, opts):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
source (str,AbstractPartition): The path from where we mount or a partition
|
||||||
|
destination (str): The path of the mountpoint
|
||||||
|
opts (list): List of options to pass to the mount command
|
||||||
|
"""
|
||||||
self.source = source
|
self.source = source
|
||||||
self.destination = destination
|
self.destination = destination
|
||||||
self.opts = opts
|
self.opts = opts
|
||||||
|
|
||||||
def mount(self, prefix):
|
def mount(self, prefix):
|
||||||
|
"""Performs the mount operation or forwards it to another partition
|
||||||
|
Args:
|
||||||
|
prefix (str): Path prefix of the mountpoint
|
||||||
|
"""
|
||||||
mount_dir = os.path.join(prefix, self.destination)
|
mount_dir = os.path.join(prefix, self.destination)
|
||||||
|
# If the source is another partition, we tell that partition to mount itself
|
||||||
if isinstance(self.source, AbstractPartition):
|
if isinstance(self.source, AbstractPartition):
|
||||||
self.source.mount(destination=mount_dir)
|
self.source.mount(destination=mount_dir)
|
||||||
else:
|
else:
|
||||||
log_check_call(['/bin/mount'] + self.opts + [self.source, mount_dir])
|
log_check_call(['mount'] + self.opts + [self.source, mount_dir])
|
||||||
self.mount_dir = mount_dir
|
self.mount_dir = mount_dir
|
||||||
|
|
||||||
def unmount(self):
|
def unmount(self):
|
||||||
|
"""Performs the unmount operation or asks the partition to unmount itself
|
||||||
|
"""
|
||||||
|
# If its a partition, it can unmount itself
|
||||||
if isinstance(self.source, AbstractPartition):
|
if isinstance(self.source, AbstractPartition):
|
||||||
self.source.unmount()
|
self.source.unmount()
|
||||||
else:
|
else:
|
||||||
log_check_call(['/bin/umount', self.mount_dir])
|
log_check_call(['umount', self.mount_dir])
|
||||||
del self.mount_dir
|
del self.mount_dir
|
||||||
|
|
||||||
def __init__(self, size, filesystem):
|
def __init__(self, size, filesystem, format_command):
|
||||||
self.size = size
|
"""
|
||||||
self.filesystem = filesystem
|
Args:
|
||||||
self.device_path = None
|
size (Bytes): Size of the partition
|
||||||
self.mounts = {}
|
filesystem (str): Filesystem the partition should be formatted with
|
||||||
|
format_command (list): Optional format command, valid variables are fs, device_path and size
|
||||||
|
"""
|
||||||
|
self.size = size
|
||||||
|
self.filesystem = filesystem
|
||||||
|
self.format_command = format_command
|
||||||
|
# Path to the partition
|
||||||
|
self.device_path = None
|
||||||
|
# Dictionary with mount points as keys and Mount objects as values
|
||||||
|
self.mounts = {}
|
||||||
|
|
||||||
|
# Create the configuration for our state machine
|
||||||
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
|
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
|
||||||
super(AbstractPartition, self).__init__(cfg)
|
super(AbstractPartition, self).__init__(cfg)
|
||||||
|
|
||||||
def get_uuid(self):
|
def get_uuid(self):
|
||||||
[uuid] = log_check_call(['/sbin/blkid', '-s', 'UUID', '-o', 'value', self.device_path])
|
"""Gets the UUID of the partition
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str. The UUID of the partition
|
||||||
|
"""
|
||||||
|
[uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path])
|
||||||
return uuid
|
return uuid
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -54,33 +89,77 @@ class AbstractPartition(FSMProxy):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_end(self):
|
def get_end(self):
|
||||||
|
"""Gets the end of the partition
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bytes. The end of the partition
|
||||||
|
"""
|
||||||
return self.get_start() + self.size
|
return self.get_start() + self.size
|
||||||
|
|
||||||
def _before_format(self, e):
|
def _before_format(self, e):
|
||||||
mkfs = '/sbin/mkfs.{fs}'.format(fs=self.filesystem)
|
"""Formats the partition
|
||||||
log_check_call([mkfs, self.device_path])
|
"""
|
||||||
|
# If there is no explicit format_command define we simply call mkfs.fstype
|
||||||
|
if self.format_command is None:
|
||||||
|
format_command = ['mkfs.{fs}', '{device_path}']
|
||||||
|
else:
|
||||||
|
format_command = self.format_command
|
||||||
|
variables = {'fs': self.filesystem,
|
||||||
|
'device_path': self.device_path,
|
||||||
|
'size': self.size,
|
||||||
|
}
|
||||||
|
command = map(lambda part: part.format(**variables), format_command)
|
||||||
|
# Format the partition
|
||||||
|
log_check_call(command)
|
||||||
|
|
||||||
def _before_mount(self, e):
|
def _before_mount(self, e):
|
||||||
log_check_call(['/bin/mount', '--types', self.filesystem, self.device_path, e.destination])
|
"""Mount the partition
|
||||||
|
"""
|
||||||
|
log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination])
|
||||||
self.mount_dir = e.destination
|
self.mount_dir = e.destination
|
||||||
|
|
||||||
def _after_mount(self, e):
|
def _after_mount(self, e):
|
||||||
|
"""Mount any mounts associated with this partition
|
||||||
|
"""
|
||||||
|
# Make sure we mount in ascending order of mountpoint path length
|
||||||
|
# This ensures that we don't mount /dev/pts before we mount /dev
|
||||||
for destination in sorted(self.mounts.iterkeys(), key=len):
|
for destination in sorted(self.mounts.iterkeys(), key=len):
|
||||||
self.mounts[destination].mount(self.mount_dir)
|
self.mounts[destination].mount(self.mount_dir)
|
||||||
|
|
||||||
def _before_unmount(self, e):
|
def _before_unmount(self, e):
|
||||||
|
"""Unmount any mounts associated with this partition
|
||||||
|
"""
|
||||||
|
# Unmount the mounts in descending order of mounpoint path length
|
||||||
|
# You cannot unmount /dev before you have unmounted /dev/pts
|
||||||
for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True):
|
for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True):
|
||||||
self.mounts[destination].unmount()
|
self.mounts[destination].unmount()
|
||||||
log_check_call(['/bin/umount', self.mount_dir])
|
log_check_call(['umount', self.mount_dir])
|
||||||
del self.mount_dir
|
del self.mount_dir
|
||||||
|
|
||||||
def add_mount(self, source, destination, opts=[]):
|
def add_mount(self, source, destination, opts=[]):
|
||||||
|
"""Associate a mount with this partition
|
||||||
|
Automatically mounts it
|
||||||
|
|
||||||
|
Args:
|
||||||
|
source (str,AbstractPartition): The source of the mount
|
||||||
|
destination (str): The path to the mountpoint
|
||||||
|
opts (list): Any options that should be passed to the mount command
|
||||||
|
"""
|
||||||
|
# Create a new mount object, mount it if the partition is mounted and put it in the mounts dict
|
||||||
mount = self.Mount(source, destination, opts)
|
mount = self.Mount(source, destination, opts)
|
||||||
if self.fsm.current == 'mounted':
|
if self.fsm.current == 'mounted':
|
||||||
mount.mount(self.mount_dir)
|
mount.mount(self.mount_dir)
|
||||||
self.mounts[destination] = mount
|
self.mounts[destination] = mount
|
||||||
|
|
||||||
def remove_mount(self, destination):
|
def remove_mount(self, destination):
|
||||||
|
"""Remove a mount from this partition
|
||||||
|
Automatically unmounts it
|
||||||
|
|
||||||
|
Args:
|
||||||
|
destination (str): The mountpoint path of the mount that should be removed
|
||||||
|
"""
|
||||||
|
# Unmount the mount if the partition is mounted and delete it from the mounts dict
|
||||||
|
# If the mount is already unmounted and the source is a partition, this will raise an exception
|
||||||
if self.fsm.current == 'mounted':
|
if self.fsm.current == 'mounted':
|
||||||
self.mounts[destination].unmount()
|
self.mounts[destination].unmount()
|
||||||
del self.mounts[destination]
|
del self.mounts[destination]
|
||||||
|
|
|
@ -2,7 +2,11 @@ from abstract import AbstractPartition
|
||||||
|
|
||||||
|
|
||||||
class BasePartition(AbstractPartition):
|
class BasePartition(AbstractPartition):
|
||||||
|
"""Represents a partition that is actually a partition (and not a virtual one like 'Single')
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Override the states of the abstract partition
|
||||||
|
# A real partition can be mapped and unmapped
|
||||||
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
||||||
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
||||||
{'name': 'format', 'src': 'mapped', 'dst': 'formatted'},
|
{'name': 'format', 'src': 'mapped', 'dst': 'formatted'},
|
||||||
|
@ -14,46 +18,88 @@ class BasePartition(AbstractPartition):
|
||||||
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, size, filesystem, previous):
|
def __init__(self, size, filesystem, format_command, previous):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
size (Bytes): Size of the partition
|
||||||
|
filesystem (str): Filesystem the partition should be formatted with
|
||||||
|
format_command (list): Optional format command, valid variables are fs, device_path and size
|
||||||
|
previous (BasePartition): The partition that preceeds this one
|
||||||
|
"""
|
||||||
|
# By saving the previous partition we have
|
||||||
|
# a linked list that partitions can go backwards in to find the first partition.
|
||||||
self.previous = previous
|
self.previous = previous
|
||||||
from common.bytes import Bytes
|
from common.bytes import Bytes
|
||||||
|
# Initialize the offset to 0 bytes, may be changed later
|
||||||
self.offset = Bytes(0)
|
self.offset = Bytes(0)
|
||||||
|
# List of flags that parted should put on the partition
|
||||||
self.flags = []
|
self.flags = []
|
||||||
super(BasePartition, self).__init__(size, filesystem)
|
super(BasePartition, self).__init__(size, filesystem, format_command)
|
||||||
|
|
||||||
def create(self, volume):
|
def create(self, volume):
|
||||||
|
"""Creates the partition
|
||||||
|
|
||||||
|
Args:
|
||||||
|
volume (Volume): The volume to create the partition on
|
||||||
|
"""
|
||||||
self.fsm.create(volume=volume)
|
self.fsm.create(volume=volume)
|
||||||
|
|
||||||
def get_index(self):
|
def get_index(self):
|
||||||
|
"""Gets the index of this partition in the partition map
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int. The index of the partition in the partition map
|
||||||
|
"""
|
||||||
if self.previous is None:
|
if self.previous is None:
|
||||||
|
# Partitions are 1 indexed
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
|
# Recursive call to the previous partition, walking up the chain...
|
||||||
return self.previous.get_index() + 1
|
return self.previous.get_index() + 1
|
||||||
|
|
||||||
def get_start(self):
|
def get_start(self):
|
||||||
|
"""Gets the starting byte of this partition
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bytes. The starting byte of this partition
|
||||||
|
"""
|
||||||
if self.previous is None:
|
if self.previous is None:
|
||||||
|
# If there is no previous partition, this partition begins at the offset
|
||||||
return self.offset
|
return self.offset
|
||||||
else:
|
else:
|
||||||
|
# Get the end of the previous partition and add the offset of this partition
|
||||||
return self.previous.get_end() + self.offset
|
return self.previous.get_end() + self.offset
|
||||||
|
|
||||||
def map(self, device_path):
|
def map(self, device_path):
|
||||||
|
"""Maps the partition to a device_path
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device_path (str): The device patht his partition should be mapped to
|
||||||
|
"""
|
||||||
self.fsm.map(device_path=device_path)
|
self.fsm.map(device_path=device_path)
|
||||||
|
|
||||||
def _before_create(self, e):
|
def _before_create(self, e):
|
||||||
|
"""Creates the partition
|
||||||
|
"""
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
|
# The create command is failry simple, start and end are just Bytes objects coerced into strings
|
||||||
create_command = ('mkpart primary {start} {end}'
|
create_command = ('mkpart primary {start} {end}'
|
||||||
.format(start=str(self.get_start()),
|
.format(start=str(self.get_start()),
|
||||||
end=str(self.get_end())))
|
end=str(self.get_end())))
|
||||||
log_check_call(['/sbin/parted', '--script', '--align', 'none', e.volume.device_path,
|
# Create the partition
|
||||||
|
log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path,
|
||||||
'--', create_command])
|
'--', create_command])
|
||||||
|
|
||||||
|
# Set any flags on the partition
|
||||||
for flag in self.flags:
|
for flag in self.flags:
|
||||||
log_check_call(['/sbin/parted', '--script', e.volume.device_path,
|
log_check_call(['parted', '--script', e.volume.device_path,
|
||||||
'--', ('set {idx} {flag} on'
|
'--', ('set {idx} {flag} on'
|
||||||
.format(idx=str(self.get_index()), flag=flag))])
|
.format(idx=str(self.get_index()), flag=flag))])
|
||||||
|
|
||||||
def _before_map(self, e):
|
def _before_map(self, e):
|
||||||
|
# Set the device path
|
||||||
self.device_path = e.device_path
|
self.device_path = e.device_path
|
||||||
|
|
||||||
def _before_unmap(self, e):
|
def _before_unmap(self, e):
|
||||||
|
# When unmapped, the device_path ifnromation becomes invalid, so we delete it
|
||||||
self.device_path = None
|
self.device_path = None
|
||||||
|
|
|
@ -3,16 +3,27 @@ from base import BasePartition
|
||||||
|
|
||||||
|
|
||||||
class GPTPartition(BasePartition):
|
class GPTPartition(BasePartition):
|
||||||
|
"""Represents a GPT partition
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, size, filesystem, name, previous):
|
def __init__(self, size, filesystem, format_command, name, previous):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
size (Bytes): Size of the partition
|
||||||
|
filesystem (str): Filesystem the partition should be formatted with
|
||||||
|
format_command (list): Optional format command, valid variables are fs, device_path and size
|
||||||
|
name (str): The name of the partition
|
||||||
|
previous (BasePartition): The partition that preceeds this one
|
||||||
|
"""
|
||||||
self.name = name
|
self.name = name
|
||||||
super(GPTPartition, self).__init__(size, filesystem, previous)
|
super(GPTPartition, self).__init__(size, filesystem, format_command, previous)
|
||||||
|
|
||||||
def _before_create(self, e):
|
def _before_create(self, e):
|
||||||
|
# Create the partition and then set the name of the partition afterwards
|
||||||
super(GPTPartition, self)._before_create(e)
|
super(GPTPartition, self)._before_create(e)
|
||||||
# partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical)
|
# partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical)
|
||||||
name_command = ('name {idx} {name}'
|
name_command = ('name {idx} {name}'
|
||||||
.format(idx=self.get_index(),
|
.format(idx=self.get_index(),
|
||||||
name=self.name))
|
name=self.name))
|
||||||
log_check_call(['/sbin/parted', '--script', e.volume.device_path,
|
log_check_call(['parted', '--script', e.volume.device_path,
|
||||||
'--', name_command])
|
'--', name_command])
|
||||||
|
|
|
@ -3,9 +3,16 @@ from gpt import GPTPartition
|
||||||
|
|
||||||
|
|
||||||
class GPTSwapPartition(GPTPartition):
|
class GPTSwapPartition(GPTPartition):
|
||||||
|
"""Represents a GPT swap partition
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, size, previous):
|
def __init__(self, size, previous):
|
||||||
super(GPTSwapPartition, self).__init__(size, 'swap', 'swap', previous)
|
"""
|
||||||
|
Args:
|
||||||
|
size (Bytes): Size of the partition
|
||||||
|
previous (BasePartition): The partition that preceeds this one
|
||||||
|
"""
|
||||||
|
super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous)
|
||||||
|
|
||||||
def _before_format(self, e):
|
def _before_format(self, e):
|
||||||
log_check_call(['/sbin/mkswap', self.device_path])
|
log_check_call(['mkswap', self.device_path])
|
||||||
|
|
|
@ -2,4 +2,6 @@ from base import BasePartition
|
||||||
|
|
||||||
|
|
||||||
class MSDOSPartition(BasePartition):
|
class MSDOSPartition(BasePartition):
|
||||||
|
"""Represents an MS-DOS partition
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -3,9 +3,16 @@ from msdos import MSDOSPartition
|
||||||
|
|
||||||
|
|
||||||
class MSDOSSwapPartition(MSDOSPartition):
|
class MSDOSSwapPartition(MSDOSPartition):
|
||||||
|
"""Represents a MS-DOS swap partition
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, size, previous):
|
def __init__(self, size, previous):
|
||||||
super(MSDOSSwapPartition, self).__init__(size, 'swap', previous)
|
"""
|
||||||
|
Args:
|
||||||
|
size (Bytes): Size of the partition
|
||||||
|
previous (BasePartition): The partition that preceeds this one
|
||||||
|
"""
|
||||||
|
super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous)
|
||||||
|
|
||||||
def _before_format(self, e):
|
def _before_format(self, e):
|
||||||
log_check_call(['/sbin/mkswap', self.device_path])
|
log_check_call(['mkswap', self.device_path])
|
||||||
|
|
|
@ -2,7 +2,15 @@ from abstract import AbstractPartition
|
||||||
|
|
||||||
|
|
||||||
class SinglePartition(AbstractPartition):
|
class SinglePartition(AbstractPartition):
|
||||||
|
"""Represents a single virtual partition on an unpartitioned volume
|
||||||
|
"""
|
||||||
|
|
||||||
def get_start(self):
|
def get_start(self):
|
||||||
|
"""Gets the starting byte of this partition
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bytes. The starting byte of this partition
|
||||||
|
"""
|
||||||
from common.bytes import Bytes
|
from common.bytes import Bytes
|
||||||
|
# On an unpartitioned volume there is no offset and no previous partition
|
||||||
return Bytes(0)
|
return Bytes(0)
|
||||||
|
|
|
@ -2,11 +2,20 @@ from base import BasePartition
|
||||||
|
|
||||||
|
|
||||||
class UnformattedPartition(BasePartition):
|
class UnformattedPartition(BasePartition):
|
||||||
|
"""Represents an unformatted partition
|
||||||
|
It cannot be mounted
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The states for our state machine. It can only be mapped, not mounted.
|
||||||
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
|
||||||
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
|
||||||
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, size, previous):
|
def __init__(self, size, previous):
|
||||||
super(UnformattedPartition, self).__init__(size, None, previous)
|
"""
|
||||||
|
Args:
|
||||||
|
size (Bytes): Size of the partition
|
||||||
|
previous (BasePartition): The partition that preceeds this one
|
||||||
|
"""
|
||||||
|
super(UnformattedPartition, self).__init__(size, None, None, previous)
|
||||||
|
|
|
@ -6,9 +6,13 @@ from partitionmaps.none import NoPartitions
|
||||||
|
|
||||||
|
|
||||||
class Volume(FSMProxy):
|
class Volume(FSMProxy):
|
||||||
|
"""Represents an abstract volume.
|
||||||
|
This class is a finite state machine and represents the state of the real volume.
|
||||||
|
"""
|
||||||
|
|
||||||
__metaclass__ = ABCMeta
|
__metaclass__ = ABCMeta
|
||||||
|
|
||||||
|
# States this volume can be in
|
||||||
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'},
|
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'},
|
||||||
{'name': 'attach', 'src': 'detached', 'dst': 'attached'},
|
{'name': 'attach', 'src': 'detached', 'dst': 'attached'},
|
||||||
{'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'},
|
{'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'},
|
||||||
|
@ -18,33 +22,76 @@ class Volume(FSMProxy):
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, partition_map):
|
def __init__(self, partition_map):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
partition_map (PartitionMap): The partition map for the volume
|
||||||
|
"""
|
||||||
|
# Path to the volume
|
||||||
self.device_path = None
|
self.device_path = None
|
||||||
self.real_device_path = None
|
self.real_device_path = None
|
||||||
|
# The partition map
|
||||||
self.partition_map = partition_map
|
self.partition_map = partition_map
|
||||||
|
# The size of the volume as reported by the partition map
|
||||||
self.size = self.partition_map.get_total_size()
|
self.size = self.partition_map.get_total_size()
|
||||||
|
|
||||||
|
# Before detaching, check that nothing would block the detachment
|
||||||
callbacks = {'onbeforedetach': self._check_blocking}
|
callbacks = {'onbeforedetach': self._check_blocking}
|
||||||
if isinstance(self.partition_map, NoPartitions):
|
if isinstance(self.partition_map, NoPartitions):
|
||||||
|
# When the volume has no partitions, the virtual root partition path is equal to that of the volume
|
||||||
|
# Update that path whenever the path to the volume changes
|
||||||
def set_dev_path(e):
|
def set_dev_path(e):
|
||||||
self.partition_map.root.device_path = self.device_path
|
self.partition_map.root.device_path = self.device_path
|
||||||
callbacks['onafterattach'] = set_dev_path
|
callbacks['onafterattach'] = set_dev_path
|
||||||
callbacks['onlink_dm_node'] = set_dev_path
|
callbacks['onlink_dm_node'] = set_dev_path
|
||||||
callbacks['onunlink_dm_node'] = set_dev_path
|
callbacks['onunlink_dm_node'] = set_dev_path
|
||||||
|
|
||||||
|
# Create the configuration for our finite state machine
|
||||||
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks}
|
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks}
|
||||||
super(Volume, self).__init__(cfg)
|
super(Volume, self).__init__(cfg)
|
||||||
|
|
||||||
def _after_create(self, e):
|
def _after_create(self, e):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
e (_e_obj): Event object containing arguments to create()
|
||||||
|
"""
|
||||||
if isinstance(self.partition_map, NoPartitions):
|
if isinstance(self.partition_map, NoPartitions):
|
||||||
|
# When the volume has no partitions, the virtual root partition
|
||||||
|
# is essentially created when the volume is created, forward that creation event.
|
||||||
self.partition_map.root.create()
|
self.partition_map.root.create()
|
||||||
|
|
||||||
def _check_blocking(self, e):
|
def _check_blocking(self, e):
|
||||||
|
"""Checks whether the volume is blocked
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (_e_obj): Event object containing arguments to create()
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
VolumeError
|
||||||
|
"""
|
||||||
|
# Only the partition map can block the volume
|
||||||
if self.partition_map.is_blocking():
|
if self.partition_map.is_blocking():
|
||||||
raise VolumeError('The partitionmap prevents the detach procedure')
|
raise VolumeError('The partitionmap prevents the detach procedure')
|
||||||
|
|
||||||
def _before_link_dm_node(self, e):
|
def _before_link_dm_node(self, e):
|
||||||
|
"""Links the volume using the device mapper
|
||||||
|
This allows us to create a 'window' into the volume that acts like a volum in itself.
|
||||||
|
Mainly it is used to fool grub into thinking that it is working with a real volume,
|
||||||
|
rather than a loopback device or a network block device.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (_e_obj): Event object containing arguments to create()
|
||||||
|
Arguments are:
|
||||||
|
logical_start_sector (int): The sector the volume should start at in the new volume
|
||||||
|
start_sector (int): The offset at which the volume should begin to be mapped in the new volume
|
||||||
|
sectors (int): The number of sectors that should be mapped
|
||||||
|
Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
VolumeError
|
||||||
|
"""
|
||||||
import os.path
|
import os.path
|
||||||
from common.fs import get_partitions
|
from common.fs import get_partitions
|
||||||
|
# Fetch information from /proc/partitions
|
||||||
proc_partitions = get_partitions()
|
proc_partitions = get_partitions()
|
||||||
device_name = os.path.basename(self.device_path)
|
device_name = os.path.basename(self.device_path)
|
||||||
device_partition = proc_partitions[device_name]
|
device_partition = proc_partitions[device_name]
|
||||||
|
@ -55,8 +102,10 @@ class Volume(FSMProxy):
|
||||||
# The offset at which the volume should begin to be mapped in the new volume
|
# The offset at which the volume should begin to be mapped in the new volume
|
||||||
start_sector = getattr(e, 'start_sector', 0)
|
start_sector = getattr(e, 'start_sector', 0)
|
||||||
|
|
||||||
|
# The number of sectors that should be mapped
|
||||||
sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector)
|
sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector)
|
||||||
|
|
||||||
|
# This is the table we send to dmsetup, so that it may create a decie mapping for us.
|
||||||
table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}'
|
table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}'
|
||||||
.format(log_start_sec=logical_start_sector,
|
.format(log_start_sec=logical_start_sector,
|
||||||
sectors=sectors,
|
sectors=sectors,
|
||||||
|
@ -65,6 +114,7 @@ class Volume(FSMProxy):
|
||||||
start_sec=start_sector))
|
start_sec=start_sector))
|
||||||
import string
|
import string
|
||||||
import os.path
|
import os.path
|
||||||
|
# Figure out the device letter and path
|
||||||
for letter in string.ascii_lowercase:
|
for letter in string.ascii_lowercase:
|
||||||
dev_name = 'vd' + letter
|
dev_name = 'vd' + letter
|
||||||
dev_path = os.path.join('/dev/mapper', dev_name)
|
dev_path = os.path.join('/dev/mapper', dev_name)
|
||||||
|
@ -76,12 +126,21 @@ class Volume(FSMProxy):
|
||||||
if not hasattr(self, 'dm_node_name'):
|
if not hasattr(self, 'dm_node_name'):
|
||||||
raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume')
|
raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume')
|
||||||
|
|
||||||
log_check_call(['/sbin/dmsetup', 'create', self.dm_node_name], table)
|
# Create the device mapping
|
||||||
|
log_check_call(['dmsetup', 'create', self.dm_node_name], table)
|
||||||
|
# Update the device_path but remember the old one for when we unlink the volume again
|
||||||
self.unlinked_device_path = self.device_path
|
self.unlinked_device_path = self.device_path
|
||||||
self.device_path = self.dm_node_path
|
self.device_path = self.dm_node_path
|
||||||
|
|
||||||
def _before_unlink_dm_node(self, e):
|
def _before_unlink_dm_node(self, e):
|
||||||
log_check_call(['/sbin/dmsetup', 'remove', self.dm_node_name])
|
"""Unlinks the device mapping
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e (_e_obj): Event object containing arguments to create()
|
||||||
|
"""
|
||||||
|
log_check_call(['dmsetup', 'remove', self.dm_node_name])
|
||||||
|
# Delete the no longer valid information
|
||||||
del self.dm_node_name
|
del self.dm_node_name
|
||||||
del self.dm_node_path
|
del self.dm_node_path
|
||||||
|
# Reset the device_path
|
||||||
self.device_path = self.unlinked_device_path
|
self.device_path = self.unlinked_device_path
|
||||||
|
|
33
base/log.py
33
base/log.py
|
@ -1,7 +1,20 @@
|
||||||
|
"""This module holds functions and classes responsible for formatting the log output
|
||||||
|
both to a file and to the console.
|
||||||
|
.. module:: log
|
||||||
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
def get_logfile_path(manifest_path):
|
def get_logfile_path(manifest_path):
|
||||||
|
"""Returns the path to a logfile given a manifest
|
||||||
|
The logfile name is constructed from the current timestamp and the basename of the manifest
|
||||||
|
|
||||||
|
Args:
|
||||||
|
manifest_path (str): The path to the manifest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str. The path to the logfile
|
||||||
|
"""
|
||||||
import os.path
|
import os.path
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
@ -13,17 +26,31 @@ def get_logfile_path(manifest_path):
|
||||||
|
|
||||||
|
|
||||||
def setup_logger(logfile=None, debug=False):
|
def setup_logger(logfile=None, debug=False):
|
||||||
|
"""Sets up the python logger to log to both a file and the console
|
||||||
|
|
||||||
|
Args:
|
||||||
|
logfile (str): Path to a logfile
|
||||||
|
debug (bool): Whether to log debug output to the console
|
||||||
|
"""
|
||||||
root = logging.getLogger()
|
root = logging.getLogger()
|
||||||
|
# Make sure all logging statements are processed by our handlers, they decide the log level
|
||||||
root.setLevel(logging.NOTSET)
|
root.setLevel(logging.NOTSET)
|
||||||
|
|
||||||
|
# Create a file log handler
|
||||||
file_handler = logging.FileHandler(logfile)
|
file_handler = logging.FileHandler(logfile)
|
||||||
|
# Absolute timestamps are rather useless when bootstrapping, it's much more interesting
|
||||||
|
# to see how long things take, so we log in a relative format instead
|
||||||
file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s'))
|
file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s'))
|
||||||
|
# The file log handler always logs everything
|
||||||
file_handler.setLevel(logging.DEBUG)
|
file_handler.setLevel(logging.DEBUG)
|
||||||
root.addHandler(file_handler)
|
root.addHandler(file_handler)
|
||||||
|
|
||||||
|
# Create a console log handler
|
||||||
import sys
|
import sys
|
||||||
console_handler = logging.StreamHandler(sys.stderr)
|
console_handler = logging.StreamHandler(sys.stderr)
|
||||||
|
# We want to colorize the output to the console, so we add a formatter
|
||||||
console_handler.setFormatter(ConsoleFormatter())
|
console_handler.setFormatter(ConsoleFormatter())
|
||||||
|
# Set the log level depending on the debug argument
|
||||||
if debug:
|
if debug:
|
||||||
console_handler.setLevel(logging.DEBUG)
|
console_handler.setLevel(logging.DEBUG)
|
||||||
else:
|
else:
|
||||||
|
@ -32,6 +59,8 @@ def setup_logger(logfile=None, debug=False):
|
||||||
|
|
||||||
|
|
||||||
class ConsoleFormatter(logging.Formatter):
|
class ConsoleFormatter(logging.Formatter):
|
||||||
|
"""Formats log statements for the console
|
||||||
|
"""
|
||||||
level_colors = {logging.ERROR: 'red',
|
level_colors = {logging.ERROR: 'red',
|
||||||
logging.WARNING: 'magenta',
|
logging.WARNING: 'magenta',
|
||||||
logging.INFO: 'blue',
|
logging.INFO: 'blue',
|
||||||
|
@ -39,11 +68,15 @@ class ConsoleFormatter(logging.Formatter):
|
||||||
|
|
||||||
def format(self, record):
|
def format(self, record):
|
||||||
if(record.levelno in self.level_colors):
|
if(record.levelno in self.level_colors):
|
||||||
|
# Colorize the message if we have a color for it (DEBUG has no color)
|
||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
record.msg = colored(record.msg, self.level_colors[record.levelno])
|
record.msg = colored(record.msg, self.level_colors[record.levelno])
|
||||||
return super(ConsoleFormatter, self).format(record)
|
return super(ConsoleFormatter, self).format(record)
|
||||||
|
|
||||||
|
|
||||||
class FileFormatter(logging.Formatter):
|
class FileFormatter(logging.Formatter):
|
||||||
|
"""Formats log statements for output to file
|
||||||
|
Currently this is just a stub
|
||||||
|
"""
|
||||||
def format(self, record):
|
def format(self, record):
|
||||||
return super(FileFormatter, self).format(record)
|
return super(FileFormatter, self).format(record)
|
||||||
|
|
48
base/main.py
48
base/main.py
|
@ -1,16 +1,34 @@
|
||||||
|
"""Main module containing all the setup necessary for running the bootstrapping process
|
||||||
|
.. module:: main
|
||||||
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
import log
|
"""Main function for invoking the bootstrap process
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception
|
||||||
|
"""
|
||||||
|
# Get the commandline arguments
|
||||||
|
import os
|
||||||
args = get_args()
|
args = get_args()
|
||||||
|
# Require root privileges, except when doing a dry-run where they aren't needed
|
||||||
|
if os.geteuid() != 0 and not args.dry_run:
|
||||||
|
raise Exception('This program requires root privileges.')
|
||||||
|
# Setup logging
|
||||||
|
import log
|
||||||
logfile = log.get_logfile_path(args.manifest)
|
logfile = log.get_logfile_path(args.manifest)
|
||||||
log.setup_logger(logfile=logfile, debug=args.debug)
|
log.setup_logger(logfile=logfile, debug=args.debug)
|
||||||
|
# Everything has been set up, begin the bootstrapping process
|
||||||
run(args)
|
run(args)
|
||||||
|
|
||||||
|
|
||||||
def get_args():
|
def get_args():
|
||||||
|
"""Creates an argument parser and returns the arguments it has parsed
|
||||||
|
"""
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
parser = ArgumentParser(description='Bootstrap Debian for the cloud.')
|
parser = ArgumentParser(description='Bootstrap Debian for the cloud.')
|
||||||
parser.add_argument('--debug', action='store_true',
|
parser.add_argument('--debug', action='store_true',
|
||||||
|
@ -24,31 +42,57 @@ def get_args():
|
||||||
|
|
||||||
|
|
||||||
def run(args):
|
def run(args):
|
||||||
|
"""Runs the bootstrapping process
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (dict): Dictionary of arguments from the commandline
|
||||||
|
"""
|
||||||
|
# Load the manifest
|
||||||
from manifest import Manifest
|
from manifest import Manifest
|
||||||
manifest = Manifest(args.manifest)
|
manifest = Manifest(args.manifest)
|
||||||
|
|
||||||
|
# Get the tasklist
|
||||||
from tasklist import TaskList
|
from tasklist import TaskList
|
||||||
tasklist = TaskList()
|
tasklist = TaskList()
|
||||||
|
# 'resolve_tasks' is the name of the function to call on the provider and plugins
|
||||||
tasklist.load('resolve_tasks', manifest)
|
tasklist.load('resolve_tasks', manifest)
|
||||||
|
|
||||||
|
# Create the bootstrap information object that'll be used throughout the bootstrapping process
|
||||||
from bootstrapinfo import BootstrapInformation
|
from bootstrapinfo import BootstrapInformation
|
||||||
bootstrap_info = BootstrapInformation(manifest=manifest, debug=args.debug)
|
bootstrap_info = BootstrapInformation(manifest=manifest, debug=args.debug)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Run all the tasks the tasklist has gathered
|
||||||
tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
|
tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
|
||||||
|
# We're done! :-)
|
||||||
log.info('Successfully completed bootstrapping')
|
log.info('Successfully completed bootstrapping')
|
||||||
except (Exception, KeyboardInterrupt) as e:
|
except (Exception, KeyboardInterrupt) as e:
|
||||||
|
# When an error occurs, log it and begin rollback
|
||||||
log.exception(e)
|
log.exception(e)
|
||||||
if args.pause_on_error:
|
if args.pause_on_error:
|
||||||
raw_input("Press Enter to commence rollback")
|
# The --pause-on-error is useful when the user wants to inspect the volume before rollback
|
||||||
|
raw_input('Press Enter to commence rollback')
|
||||||
log.error('Rolling back')
|
log.error('Rolling back')
|
||||||
|
|
||||||
|
# Create a new tasklist to gather the necessary tasks for rollback
|
||||||
rollback_tasklist = TaskList()
|
rollback_tasklist = TaskList()
|
||||||
|
|
||||||
|
# Create a useful little function for the provider and plugins to use,
|
||||||
|
# when figuring out what tasks should be added to the rollback list.
|
||||||
def counter_task(task, counter):
|
def counter_task(task, counter):
|
||||||
|
"""counter_task() adds the second argument to the rollback tasklist
|
||||||
|
if the first argument is present in the list of completed tasks
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (Task): The task to look for in the completed tasks list
|
||||||
|
counter (Task): The task to add to the rollback tasklist
|
||||||
|
"""
|
||||||
if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed:
|
if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed:
|
||||||
rollback_tasklist.tasks.add(counter)
|
rollback_tasklist.tasks.add(counter)
|
||||||
|
# Ask the provider and plugins for tasks they'd like to add to the rollback tasklist
|
||||||
|
# Any additional arguments beyond the first two are passed directly to the provider and plugins
|
||||||
rollback_tasklist.load('resolve_rollback_tasks', manifest, counter_task)
|
rollback_tasklist.load('resolve_rollback_tasks', manifest, counter_task)
|
||||||
|
|
||||||
|
# Run the rollback tasklist
|
||||||
rollback_tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
|
rollback_tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
|
||||||
log.info('Successfully completed rollback')
|
log.info('Successfully completed rollback')
|
||||||
|
|
|
@ -99,7 +99,7 @@
|
||||||
"additionalProperties": false
|
"additionalProperties": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["provider", "bootstrapper", "image", "volume", "system"],
|
"required": ["provider", "bootstrapper", "system", "volume"],
|
||||||
"definitions": {
|
"definitions": {
|
||||||
"path": {
|
"path": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -141,10 +141,14 @@
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"size": { "$ref": "#/definitions/bytes" },
|
"size": { "$ref": "#/definitions/bytes" },
|
||||||
"filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] }
|
"filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] },
|
||||||
|
"format_command": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"minItems": 1
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"required": ["size", "filesystem"]
|
"required": ["size", "filesystem"]
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
"required": ["provider", "bootstrapper", "system", "packages", "volume"]
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,21 +1,48 @@
|
||||||
|
"""The Manifest module contains the manifest that providers and plugins use
|
||||||
|
to determine which tasks should be added to the tasklist, what arguments various
|
||||||
|
invocations should have etc..
|
||||||
|
.. module:: manifest
|
||||||
|
"""
|
||||||
|
from common.tools import load_json
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Manifest(object):
|
class Manifest(object):
|
||||||
|
"""This class holds all the information that providers and plugins need
|
||||||
|
to perform the bootstrapping process. All actions that are taken originate from
|
||||||
|
here. The manifest shall not be modified after it has been loaded.
|
||||||
|
Currently, immutability is not enforced and it would require a fair amount of code
|
||||||
|
to enforce it, instead we just rely on tasks behaving properly.
|
||||||
|
"""
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
"""Initializer: Given a path we load, validate and parse the manifest.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str): The path to the manifest
|
||||||
|
"""
|
||||||
self.path = path
|
self.path = path
|
||||||
self.load()
|
self.load()
|
||||||
self.validate()
|
self.validate()
|
||||||
self.parse()
|
self.parse()
|
||||||
|
|
||||||
def load(self):
|
def load(self):
|
||||||
self.data = self.load_json(self.path)
|
"""Loads the manifest.
|
||||||
|
This function not only reads the manifest but also loads the specified provider and plugins.
|
||||||
|
Once they are loaded, the initialize() function is called on each of them (if it exists).
|
||||||
|
The provider must have an initialize function.
|
||||||
|
"""
|
||||||
|
# Load the manifest JSON using the loader in common.tools
|
||||||
|
# It strips comments (which are invalid in strict json) before loading the data.
|
||||||
|
self.data = load_json(self.path)
|
||||||
|
# Get the provider name from the manifest and load the corresponding module
|
||||||
provider_modname = 'providers.{provider}'.format(provider=self.data['provider'])
|
provider_modname = 'providers.{provider}'.format(provider=self.data['provider'])
|
||||||
log.debug('Loading provider `{modname}\''.format(modname=provider_modname))
|
log.debug('Loading provider `{modname}\''.format(modname=provider_modname))
|
||||||
|
# Create a modules dict that contains the loaded provider and plugins
|
||||||
self.modules = {'provider': __import__(provider_modname, fromlist=['providers']),
|
self.modules = {'provider': __import__(provider_modname, fromlist=['providers']),
|
||||||
'plugins': [],
|
'plugins': [],
|
||||||
}
|
}
|
||||||
|
# Run through all the plugins mentioned in the manifest and load them
|
||||||
if 'plugins' in self.data:
|
if 'plugins' in self.data:
|
||||||
for plugin_name, plugin_data in self.data['plugins'].iteritems():
|
for plugin_name, plugin_data in self.data['plugins'].iteritems():
|
||||||
modname = 'plugins.{plugin}'.format(plugin=plugin_name)
|
modname = 'plugins.{plugin}'.format(plugin=plugin_name)
|
||||||
|
@ -23,44 +50,76 @@ class Manifest(object):
|
||||||
plugin = __import__(modname, fromlist=['plugins'])
|
plugin = __import__(modname, fromlist=['plugins'])
|
||||||
self.modules['plugins'].append(plugin)
|
self.modules['plugins'].append(plugin)
|
||||||
|
|
||||||
|
# Run the initialize function on the provider and plugins
|
||||||
self.modules['provider'].initialize()
|
self.modules['provider'].initialize()
|
||||||
for module in self.modules['plugins']:
|
for module in self.modules['plugins']:
|
||||||
|
# Plugins are not required to have an initialize function
|
||||||
init = getattr(module, 'initialize', None)
|
init = getattr(module, 'initialize', None)
|
||||||
if callable(init):
|
if callable(init):
|
||||||
init()
|
init()
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
|
"""Validates the manifest using the base, provider and plugin validation functions.
|
||||||
|
Plugins are not required to have a validate_manifest function
|
||||||
|
"""
|
||||||
from . import validate_manifest
|
from . import validate_manifest
|
||||||
|
# Validate the manifest with the base validation function in __init__
|
||||||
validate_manifest(self.data, self.schema_validator, self.validation_error)
|
validate_manifest(self.data, self.schema_validator, self.validation_error)
|
||||||
|
# Run the provider validation
|
||||||
self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error)
|
self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error)
|
||||||
|
# Run the validation function for any plugin that has it
|
||||||
for plugin in self.modules['plugins']:
|
for plugin in self.modules['plugins']:
|
||||||
validate = getattr(plugin, 'validate_manifest', None)
|
validate = getattr(plugin, 'validate_manifest', None)
|
||||||
if callable(validate):
|
if callable(validate):
|
||||||
validate(self.data, self.schema_validator, self.validation_error)
|
validate(self.data, self.schema_validator, self.validation_error)
|
||||||
|
|
||||||
def parse(self):
|
def parse(self):
|
||||||
|
"""Parses the manifest.
|
||||||
|
Well... "parsing" is a big word.
|
||||||
|
The function really just sets up some convenient attributes so that tasks
|
||||||
|
don't have to access information with info.manifest.data['section']
|
||||||
|
but can do it with info.manifest.section.
|
||||||
|
"""
|
||||||
self.provider = self.data['provider']
|
self.provider = self.data['provider']
|
||||||
self.bootstrapper = self.data['bootstrapper']
|
self.bootstrapper = self.data['bootstrapper']
|
||||||
self.image = self.data['image']
|
self.image = self.data['image']
|
||||||
self.volume = self.data['volume']
|
self.volume = self.data['volume']
|
||||||
self.system = self.data['system']
|
self.system = self.data['system']
|
||||||
self.packages = self.data['packages']
|
# The packages and plugins section is not required
|
||||||
|
self.packages = self.data['packages'] if 'packages' in self.data else {}
|
||||||
self.plugins = self.data['plugins'] if 'plugins' in self.data else {}
|
self.plugins = self.data['plugins'] if 'plugins' in self.data else {}
|
||||||
|
|
||||||
def load_json(self, path):
|
def load_json(self, path):
|
||||||
|
"""Loads JSON. Unused and will be removed.
|
||||||
|
Use common.tools.load_json instead
|
||||||
|
"""
|
||||||
import json
|
import json
|
||||||
from minify_json import json_minify
|
from minify_json import json_minify
|
||||||
with open(path) as stream:
|
with open(path) as stream:
|
||||||
return json.loads(json_minify(stream.read(), False))
|
return json.loads(json_minify(stream.read(), False))
|
||||||
|
|
||||||
def schema_validator(self, data, schema_path):
|
def schema_validator(self, data, schema_path):
|
||||||
|
"""This convenience function is passed around to all the validation functions
|
||||||
|
so that they may run a json-schema validation by giving it the data and a path to the schema.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data (dict): Data to validate (normally the manifest data)
|
||||||
|
schema_path (str): Path to the json-schema to use for validation
|
||||||
|
"""
|
||||||
import jsonschema
|
import jsonschema
|
||||||
schema = self.load_json(schema_path)
|
schema = load_json(schema_path)
|
||||||
try:
|
try:
|
||||||
jsonschema.validate(data, schema)
|
jsonschema.validate(data, schema)
|
||||||
except jsonschema.ValidationError as e:
|
except jsonschema.ValidationError as e:
|
||||||
self.validation_error(e.message, e.path)
|
self.validation_error(e.message, e.path)
|
||||||
|
|
||||||
def validation_error(self, message, json_path=None):
|
def validation_error(self, message, json_path=None):
|
||||||
|
"""This function is passed to all validation functions so that they may
|
||||||
|
raise a validation error because a custom validation of the manifest failed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message (str): Message to user about the error
|
||||||
|
json_path (list): A path to the location in the manifest where the error occurred
|
||||||
|
"""
|
||||||
from common.exceptions import ManifestError
|
from common.exceptions import ManifestError
|
||||||
raise ManifestError(message, self.path, json_path)
|
raise ManifestError(message, self.path, json_path)
|
||||||
|
|
|
@ -1,16 +1,33 @@
|
||||||
|
|
||||||
|
|
||||||
class Phase(object):
|
class Phase(object):
|
||||||
|
"""The Phase class represents a phase a task may be in.
|
||||||
|
It has no function other than to act as an anchor in the task graph.
|
||||||
|
All phases are instantiated in common.phases
|
||||||
|
"""
|
||||||
def __init__(self, name, description):
|
def __init__(self, name, description):
|
||||||
|
# The name of the phase
|
||||||
self.name = name
|
self.name = name
|
||||||
|
# The description of the phase (currently not used anywhere)
|
||||||
self.description = description
|
self.description = description
|
||||||
|
|
||||||
def pos(self):
|
def pos(self):
|
||||||
|
"""Gets the position of the phase
|
||||||
|
Returns:
|
||||||
|
int. The positional index of the phase in relation to the other phases
|
||||||
|
"""
|
||||||
from common.phases import order
|
from common.phases import order
|
||||||
return next(i for i, phase in enumerate(order) if phase is self)
|
return next(i for i, phase in enumerate(order) if phase is self)
|
||||||
|
|
||||||
def __cmp__(self, other):
|
def __cmp__(self, other):
|
||||||
|
"""Compares the phase order in relation to the other phases
|
||||||
|
"""
|
||||||
return self.pos() - other.pos()
|
return self.pos() - other.pos()
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
"""String representation of the phase, the name suffices
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
string.
|
||||||
|
"""
|
||||||
return self.name
|
return self.name
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
|
|
||||||
|
|
||||||
class PackageError(Exception):
|
class PackageError(Exception):
|
||||||
|
"""Raised when an error occurrs while handling the packageslist
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class SourceError(Exception):
|
class SourceError(Exception):
|
||||||
|
"""Raised when an error occurs while handling the sourceslist
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -2,47 +2,96 @@ from exceptions import PackageError
|
||||||
|
|
||||||
|
|
||||||
class PackageList(object):
|
class PackageList(object):
|
||||||
|
"""Represents a list of packages
|
||||||
|
"""
|
||||||
|
|
||||||
class Remote(object):
|
class Remote(object):
|
||||||
|
"""A remote package with an optional target
|
||||||
|
"""
|
||||||
def __init__(self, name, target):
|
def __init__(self, name, target):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
name (str): The name of the package
|
||||||
|
target (str): The name of the target release
|
||||||
|
"""
|
||||||
self.name = name
|
self.name = name
|
||||||
self.target = target
|
self.target = target
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
"""Converts the package into somehting that apt-get install can parse
|
||||||
|
Returns:
|
||||||
|
string.
|
||||||
|
"""
|
||||||
if self.target is None:
|
if self.target is None:
|
||||||
return self.name
|
return self.name
|
||||||
else:
|
else:
|
||||||
return '{name}/{target}'.format(name=self.name, target=self.target)
|
return '{name}/{target}'.format(name=self.name, target=self.target)
|
||||||
|
|
||||||
class Local(object):
|
class Local(object):
|
||||||
|
"""A local package
|
||||||
|
"""
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
path (str): The path to the local package
|
||||||
|
"""
|
||||||
self.path = path
|
self.path = path
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
"""
|
||||||
|
Returns:
|
||||||
|
string. The path to the local package
|
||||||
|
"""
|
||||||
return self.path
|
return self.path
|
||||||
|
|
||||||
def __init__(self, manifest_vars, source_lists):
|
def __init__(self, manifest_vars, source_lists):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
manifest_vars (dict): The manifest variables
|
||||||
|
source_lists (SourceLists): The sourcelists for apt
|
||||||
|
"""
|
||||||
self.manifest_vars = manifest_vars
|
self.manifest_vars = manifest_vars
|
||||||
self.source_lists = source_lists
|
self.source_lists = source_lists
|
||||||
|
# The default_target is the release we are bootstrapping
|
||||||
self.default_target = '{system.release}'.format(**self.manifest_vars)
|
self.default_target = '{system.release}'.format(**self.manifest_vars)
|
||||||
|
# The list of packages that should be installed, this is not a set.
|
||||||
|
# We want to preserve the order in which the packages were added so that local
|
||||||
|
# packages may be installed in the correct order.
|
||||||
self.install = []
|
self.install = []
|
||||||
|
# A function that filters the install list and only returns remote packages
|
||||||
self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install)
|
self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install)
|
||||||
|
|
||||||
def add(self, name, target=None):
|
def add(self, name, target=None):
|
||||||
|
"""Adds a package to the install list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): The name of the package to install, may contain manifest vars references
|
||||||
|
target (str): The name of the target release for the package, may contain manifest vars references
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
PackageError
|
||||||
|
"""
|
||||||
name = name.format(**self.manifest_vars)
|
name = name.format(**self.manifest_vars)
|
||||||
if target is not None:
|
if target is not None:
|
||||||
target = target.format(**self.manifest_vars)
|
target = target.format(**self.manifest_vars)
|
||||||
|
# Check if the package has already been added.
|
||||||
|
# If so, make sure it's the same target and raise a PackageError otherwise
|
||||||
package = next((pkg for pkg in self.remote() if pkg.name == name), None)
|
package = next((pkg for pkg in self.remote() if pkg.name == name), None)
|
||||||
if package is not None:
|
if package is not None:
|
||||||
same_target = package.target != target
|
# It's the same target if the target names match or one of the targets is None
|
||||||
|
# and the other is the default target.
|
||||||
|
same_target = package.target == target
|
||||||
same_target = same_target or package.target is None and target == self.default_target
|
same_target = same_target or package.target is None and target == self.default_target
|
||||||
same_target = same_target or package.target == self.default_target and target is None
|
same_target = same_target or package.target == self.default_target and target is None
|
||||||
if not same_target:
|
if not same_target:
|
||||||
msg = ('The package {name} was already added to the package list, '
|
msg = ('The package {name} was already added to the package list, '
|
||||||
'but with another target release ({target})').format(name=name, target=package.target)
|
'but with target release `{target}\' instead of `{add_target}\''
|
||||||
|
.format(name=name, target=package.target, add_target=target))
|
||||||
raise PackageError(msg)
|
raise PackageError(msg)
|
||||||
|
# The package has already been added, skip the checks below
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Check if the target exists in the sources list, raise a PackageError if not
|
||||||
check_target = target
|
check_target = target
|
||||||
if check_target is None:
|
if check_target is None:
|
||||||
check_target = self.default_target
|
check_target = self.default_target
|
||||||
|
@ -50,8 +99,17 @@ class PackageList(object):
|
||||||
msg = ('The target release {target} was not found in the sources list').format(target=check_target)
|
msg = ('The target release {target} was not found in the sources list').format(target=check_target)
|
||||||
raise PackageError(msg)
|
raise PackageError(msg)
|
||||||
|
|
||||||
|
# Note that we maintain the target value even if it is none.
|
||||||
|
# This allows us to preserve the semantics of the default target when calling apt-get install
|
||||||
|
# Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define
|
||||||
|
# a target release. Only `apt-get install nfs-client` works.
|
||||||
self.install.append(self.Remote(name, target))
|
self.install.append(self.Remote(name, target))
|
||||||
|
|
||||||
def add_local(self, package_path):
|
def add_local(self, package_path):
|
||||||
|
"""Adds a local package to the installation list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
package_path (str): Path to the local package, may contain manifest vars references
|
||||||
|
"""
|
||||||
package_path = package_path.format(**self.manifest_vars)
|
package_path = package_path.format(**self.manifest_vars)
|
||||||
self.install.append(self.Local(package_path))
|
self.install.append(self.Local(package_path))
|
||||||
|
|
|
@ -1,12 +1,27 @@
|
||||||
|
|
||||||
|
|
||||||
class SourceLists(object):
|
class SourceLists(object):
|
||||||
|
"""Represents a list of sources lists for apt
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, manifest_vars):
|
def __init__(self, manifest_vars):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
manifest_vars (dict): The manifest variables
|
||||||
|
"""
|
||||||
|
# A dictionary with the name of the file in sources.list.d as the key
|
||||||
|
# That values are lists of Source objects
|
||||||
self.sources = {}
|
self.sources = {}
|
||||||
|
# Save the manifest variables, we need the later on
|
||||||
self.manifest_vars = manifest_vars
|
self.manifest_vars = manifest_vars
|
||||||
|
|
||||||
def add(self, name, line):
|
def add(self, name, line):
|
||||||
|
"""Adds a source to the apt sources list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the file in sources.list.d, may contain manifest vars references
|
||||||
|
line (str): The line for the source file, may contain manifest vars references
|
||||||
|
"""
|
||||||
name = name.format(**self.manifest_vars)
|
name = name.format(**self.manifest_vars)
|
||||||
line = line.format(**self.manifest_vars)
|
line = line.format(**self.manifest_vars)
|
||||||
if name not in self.sources:
|
if name not in self.sources:
|
||||||
|
@ -14,7 +29,16 @@ class SourceLists(object):
|
||||||
self.sources[name].append(Source(line))
|
self.sources[name].append(Source(line))
|
||||||
|
|
||||||
def target_exists(self, target):
|
def target_exists(self, target):
|
||||||
|
"""Checks whether the target exists in the sources list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): Name of the target to check for, may contain manifest vars references
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool. Whether the target exists
|
||||||
|
"""
|
||||||
target = target.format(**self.manifest_vars)
|
target = target.format(**self.manifest_vars)
|
||||||
|
# Run through all the sources and return True if the target exists
|
||||||
for lines in self.sources.itervalues():
|
for lines in self.sources.itervalues():
|
||||||
if target in (source.distribution for source in lines):
|
if target in (source.distribution for source in lines):
|
||||||
return True
|
return True
|
||||||
|
@ -22,8 +46,20 @@ class SourceLists(object):
|
||||||
|
|
||||||
|
|
||||||
class Source(object):
|
class Source(object):
|
||||||
|
"""Represents a single source line
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, line):
|
def __init__(self, line):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
line (str): A apt source line
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SourceError
|
||||||
|
"""
|
||||||
|
# Parse the source line and populate the class attributes with it
|
||||||
|
# The format is taken from `man sources.list`
|
||||||
|
# or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en
|
||||||
import re
|
import re
|
||||||
regexp = re.compile('^(?P<type>deb|deb-src)\s+'
|
regexp = re.compile('^(?P<type>deb|deb-src)\s+'
|
||||||
'(\[\s*(?P<options>.+\S)?\s*\]\s+)?'
|
'(\[\s*(?P<options>.+\S)?\s*\]\s+)?'
|
||||||
|
@ -45,6 +81,12 @@ class Source(object):
|
||||||
self.components = re.sub(' +', ' ', match['components']).split(' ')
|
self.components = re.sub(' +', ' ', match['components']).split(' ')
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
"""Convert the object into a source line
|
||||||
|
This is pretty much the reverse of what we're doing in the initialization function.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
string.
|
||||||
|
"""
|
||||||
options = ''
|
options = ''
|
||||||
if len(self.options) > 0:
|
if len(self.options) > 0:
|
||||||
options = ' [{options}]'.format(options=' '.join(self.options))
|
options = ' [{options}]'.format(options=' '.join(self.options))
|
||||||
|
|
22
base/release-codenames.json
Normal file
22
base/release-codenames.json
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
{ // This is a mapping of Debian release names to their respective codenames
|
||||||
|
"unstable": "sid",
|
||||||
|
"testing": "jessie",
|
||||||
|
"stable": "wheezy",
|
||||||
|
"oldstable": "squeeze",
|
||||||
|
|
||||||
|
"jessie": "jessie",
|
||||||
|
"wheezy": "wheezy",
|
||||||
|
"squeeze": "squeeze",
|
||||||
|
|
||||||
|
// The following release names are not supported, but included of completeness sake
|
||||||
|
"lenny": "lenny",
|
||||||
|
"etch": "etch",
|
||||||
|
"sarge": "sarge",
|
||||||
|
"woody": "woody",
|
||||||
|
"potato": "potato",
|
||||||
|
"slink": "slink",
|
||||||
|
"hamm": "hamm",
|
||||||
|
"bo": "bo",
|
||||||
|
"rex": "rex",
|
||||||
|
"buzz": "buzz"
|
||||||
|
}
|
20
base/task.py
20
base/task.py
|
@ -1,17 +1,37 @@
|
||||||
|
|
||||||
|
|
||||||
class Task(object):
|
class Task(object):
|
||||||
|
"""The task class represents are task that can be run.
|
||||||
|
It is merely a wrapper for the run function and should never be instantiated.
|
||||||
|
"""
|
||||||
|
# The phase this task is located in.
|
||||||
phase = None
|
phase = None
|
||||||
|
# List of tasks that should run before this task is run
|
||||||
predecessors = []
|
predecessors = []
|
||||||
|
# List of tasks that should run after this task has run
|
||||||
successors = []
|
successors = []
|
||||||
|
|
||||||
class __metaclass__(type):
|
class __metaclass__(type):
|
||||||
|
"""Metaclass to control how the class is coerced into a string
|
||||||
|
"""
|
||||||
def __repr__(cls):
|
def __repr__(cls):
|
||||||
|
"""
|
||||||
|
Returns:
|
||||||
|
string.
|
||||||
|
"""
|
||||||
return '{module}.{task}'.format(module=cls.__module__, task=cls.__name__)
|
return '{module}.{task}'.format(module=cls.__module__, task=cls.__name__)
|
||||||
|
|
||||||
def __str__(cls):
|
def __str__(cls):
|
||||||
|
"""
|
||||||
|
Returns:
|
||||||
|
string.
|
||||||
|
"""
|
||||||
return repr(cls)
|
return repr(cls)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
|
"""The run function, all work is done inside this function
|
||||||
|
Args:
|
||||||
|
info (BootstrapInformation): The bootstrap info object
|
||||||
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
137
base/tasklist.py
137
base/tasklist.py
|
@ -1,49 +1,95 @@
|
||||||
|
"""The tasklist module contains the TaskList class.
|
||||||
|
.. module:: tasklist
|
||||||
|
"""
|
||||||
|
|
||||||
from common.exceptions import TaskListError
|
from common.exceptions import TaskListError
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TaskList(object):
|
class TaskList(object):
|
||||||
|
"""The tasklist class aggregates all tasks that should be run
|
||||||
|
and orders them according to their dependencies.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.tasks = set()
|
self.tasks = set()
|
||||||
self.tasks_completed = []
|
self.tasks_completed = []
|
||||||
|
|
||||||
def load(self, function, manifest, *args):
|
def load(self, function, manifest, *args):
|
||||||
|
"""Calls 'function' on the provider and all plugins that have been loaded by the manifest.
|
||||||
|
Any additional arguments are passed directly to 'function'.
|
||||||
|
The function that is called shall accept the taskset as its first argument and the manifest
|
||||||
|
as its second argument.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
function (str): Name of the function to call
|
||||||
|
manifest (Manifest): The manifest
|
||||||
|
*args: Additional arguments that should be passed to the function that is called
|
||||||
|
"""
|
||||||
|
# Call 'function' on the provider
|
||||||
getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args)
|
getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args)
|
||||||
for plugin in manifest.modules['plugins']:
|
for plugin in manifest.modules['plugins']:
|
||||||
|
# Plugins har not required to have whatever function we call
|
||||||
fn = getattr(plugin, function, None)
|
fn = getattr(plugin, function, None)
|
||||||
if callable(fn):
|
if callable(fn):
|
||||||
fn(self.tasks, manifest, *args)
|
fn(self.tasks, manifest, *args)
|
||||||
|
|
||||||
def run(self, info={}, dry_run=False):
|
def run(self, info={}, dry_run=False):
|
||||||
|
"""Converts the taskgraph into a list and runs all tasks in that list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
info (dict): The bootstrap information object
|
||||||
|
dry_run (bool): Whether to actually run the tasks or simply step through them
|
||||||
|
"""
|
||||||
|
# Create a list for us to run
|
||||||
task_list = self.create_list()
|
task_list = self.create_list()
|
||||||
|
# Output the tasklist
|
||||||
log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list))))
|
log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list))))
|
||||||
|
|
||||||
for task in task_list:
|
for task in task_list:
|
||||||
|
# Tasks are not required to have a description
|
||||||
if hasattr(task, 'description'):
|
if hasattr(task, 'description'):
|
||||||
log.info(task.description)
|
log.info(task.description)
|
||||||
else:
|
else:
|
||||||
|
# If there is no description, simply coerce the task into a string and print its name
|
||||||
log.info('Running {task}'.format(task=task))
|
log.info('Running {task}'.format(task=task))
|
||||||
if not dry_run:
|
if not dry_run:
|
||||||
|
# Run the task
|
||||||
task.run(info)
|
task.run(info)
|
||||||
|
# Remember which tasks have been run for later use (e.g. when rolling back, because of an error)
|
||||||
self.tasks_completed.append(task)
|
self.tasks_completed.append(task)
|
||||||
|
|
||||||
def create_list(self):
|
def create_list(self):
|
||||||
|
"""Creates a list of all the tasks that should be run.
|
||||||
|
"""
|
||||||
from common.phases import order
|
from common.phases import order
|
||||||
|
# Get a hold of all tasks
|
||||||
|
tasks = self.get_all_tasks()
|
||||||
|
# Make sure the taskset is a subset of all the tasks we have gathered
|
||||||
|
self.tasks.issubset(tasks)
|
||||||
|
# Create a graph over all tasks by creating a map of each tasks successors
|
||||||
graph = {}
|
graph = {}
|
||||||
for task in self.tasks:
|
for task in tasks:
|
||||||
|
# Do a sanity check first
|
||||||
self.check_ordering(task)
|
self.check_ordering(task)
|
||||||
successors = set()
|
successors = set()
|
||||||
|
# Add all successors mentioned in the task
|
||||||
successors.update(task.successors)
|
successors.update(task.successors)
|
||||||
successors.update(filter(lambda succ: task in succ.predecessors, self.tasks))
|
# Add all tasks that mention this task as a predecessor
|
||||||
|
successors.update(filter(lambda succ: task in succ.predecessors, tasks))
|
||||||
|
# Create a list of phases that succeed the phase of this task
|
||||||
succeeding_phases = order[order.index(task.phase) + 1:]
|
succeeding_phases = order[order.index(task.phase) + 1:]
|
||||||
successors.update(filter(lambda succ: succ.phase in succeeding_phases, self.tasks))
|
# Add all tasks that occur in above mentioned succeeding phases
|
||||||
graph[task] = filter(lambda succ: succ in self.tasks, successors)
|
successors.update(filter(lambda succ: succ.phase in succeeding_phases, tasks))
|
||||||
|
# Map the successors to the task
|
||||||
|
graph[task] = successors
|
||||||
|
|
||||||
|
# Use the strongly connected components algorithm to check for cycles in our task graph
|
||||||
components = self.strongly_connected_components(graph)
|
components = self.strongly_connected_components(graph)
|
||||||
cycles_found = 0
|
cycles_found = 0
|
||||||
for component in components:
|
for component in components:
|
||||||
|
# Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out
|
||||||
if len(component) > 1:
|
if len(component) > 1:
|
||||||
cycles_found += 1
|
cycles_found += 1
|
||||||
log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component))))
|
log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component))))
|
||||||
|
@ -52,18 +98,79 @@ class TaskList(object):
|
||||||
'consult the logfile for more information.'.format(cycles_found))
|
'consult the logfile for more information.'.format(cycles_found))
|
||||||
raise TaskListError(msg)
|
raise TaskListError(msg)
|
||||||
|
|
||||||
|
# Run a topological sort on the graph, returning an ordered list
|
||||||
sorted_tasks = self.topological_sort(graph)
|
sorted_tasks = self.topological_sort(graph)
|
||||||
|
|
||||||
|
# Filter out any tasks not in the tasklist
|
||||||
|
# We want to maintain ordering, so we don't use set intersection
|
||||||
|
sorted_tasks = filter(lambda task: task in self.tasks, sorted_tasks)
|
||||||
return sorted_tasks
|
return sorted_tasks
|
||||||
|
|
||||||
|
def get_all_tasks(self):
|
||||||
|
"""Gets a list of all task classes in the package
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list. A list of all tasks in the package
|
||||||
|
"""
|
||||||
|
# Get a generator that returns all classes in the package
|
||||||
|
classes = self.get_all_classes('..')
|
||||||
|
|
||||||
|
# lambda function to check whether a class is a task (excluding the superclass Task)
|
||||||
|
def is_task(obj):
|
||||||
|
from task import Task
|
||||||
|
return issubclass(obj, Task) and obj is not Task
|
||||||
|
return filter(is_task, classes) # Only return classes that are tasks
|
||||||
|
|
||||||
|
def get_all_classes(self, path=None):
|
||||||
|
""" Given a path to a package, this function retrieves all the classes in it
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path (str): Path to the package
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
generator. A generator that yields classes
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception
|
||||||
|
"""
|
||||||
|
import pkgutil
|
||||||
|
import importlib
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
def walk_error(module):
|
||||||
|
raise Exception('Unable to inspect module `{module}\''.format(module=module))
|
||||||
|
walker = pkgutil.walk_packages(path, '', walk_error)
|
||||||
|
for _, module_name, _ in walker:
|
||||||
|
module = importlib.import_module(module_name)
|
||||||
|
classes = inspect.getmembers(module, inspect.isclass)
|
||||||
|
for class_name, obj in classes:
|
||||||
|
# We only want classes that are defined in the module, and not imported ones
|
||||||
|
if obj.__module__ == module_name:
|
||||||
|
yield obj
|
||||||
|
|
||||||
def check_ordering(self, task):
|
def check_ordering(self, task):
|
||||||
|
"""Checks the ordering of a task in relation to other tasks and their phases
|
||||||
|
This function checks for a subset of what the strongly connected components algorithm does,
|
||||||
|
but can deliver a more precise error message, namely that there is a conflict between
|
||||||
|
what a task has specified as its predecessors or successors and in which phase it is placed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (Task): The task to check the ordering for
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TaskListError
|
||||||
|
"""
|
||||||
for successor in task.successors:
|
for successor in task.successors:
|
||||||
|
# Run through all successors and check whether the phase of the task
|
||||||
|
# comes before the phase of a successor
|
||||||
if successor.phase > successor.phase:
|
if successor.phase > successor.phase:
|
||||||
msg = ("The task {task} is specified as running before {other}, "
|
msg = ("The task {task} is specified as running before {other}, "
|
||||||
"but its phase '{phase}' lies after the phase '{other_phase}'"
|
"but its phase '{phase}' lies after the phase '{other_phase}'"
|
||||||
.format(task=task, other=successor, phase=task.phase, other_phase=successor.phase))
|
.format(task=task, other=successor, phase=task.phase, other_phase=successor.phase))
|
||||||
raise TaskListError(msg)
|
raise TaskListError(msg)
|
||||||
for predecessor in task.predecessors:
|
for predecessor in task.predecessors:
|
||||||
|
# Run through all predecessors and check whether the phase of the task
|
||||||
|
# comes after the phase of a predecessor
|
||||||
if task.phase < predecessor.phase:
|
if task.phase < predecessor.phase:
|
||||||
msg = ("The task {task} is specified as running after {other}, "
|
msg = ("The task {task} is specified as running after {other}, "
|
||||||
"but its phase '{phase}' lies before the phase '{other_phase}'"
|
"but its phase '{phase}' lies before the phase '{other_phase}'"
|
||||||
|
@ -71,9 +178,15 @@ class TaskList(object):
|
||||||
raise TaskListError(msg)
|
raise TaskListError(msg)
|
||||||
|
|
||||||
def strongly_connected_components(self, graph):
|
def strongly_connected_components(self, graph):
|
||||||
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
|
"""Find the strongly connected components in a graph using Tarjan's algorithm.
|
||||||
# Find the strongly connected components in a graph using Tarjan's algorithm.
|
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
|
||||||
# graph should be a dictionary mapping node names to lists of successor nodes.
|
|
||||||
|
Args:
|
||||||
|
graph (dict): mapping of tasks to lists of successor tasks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list. List of tuples that are strongly connected comoponents
|
||||||
|
"""
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
stack = []
|
stack = []
|
||||||
|
@ -105,7 +218,15 @@ class TaskList(object):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def topological_sort(self, graph):
|
def topological_sort(self, graph):
|
||||||
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
|
"""Runs a topological sort on a graph
|
||||||
|
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph (dict): mapping of tasks to lists of successor tasks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list. A list of all tasks in the graph sorted according to ther dependencies
|
||||||
|
"""
|
||||||
count = {}
|
count = {}
|
||||||
for node in graph:
|
for node in graph:
|
||||||
count[node] = 0
|
count[node] = 0
|
||||||
|
|
|
@ -4,9 +4,9 @@ class Bytes(object):
|
||||||
|
|
||||||
units = {'B': 1,
|
units = {'B': 1,
|
||||||
'KiB': 1024,
|
'KiB': 1024,
|
||||||
'MiB': 1024*1024,
|
'MiB': 1024 * 1024,
|
||||||
'GiB': 1024*1024*1024,
|
'GiB': 1024 * 1024 * 1024,
|
||||||
'TiB': 1024*1024*1024*1024,
|
'TiB': 1024 * 1024 * 1024 * 1024,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, qty):
|
def __init__(self, qty):
|
||||||
|
|
|
@ -12,14 +12,14 @@ class LoopbackVolume(Volume):
|
||||||
def _before_create(self, e):
|
def _before_create(self, e):
|
||||||
self.image_path = e.image_path
|
self.image_path = e.image_path
|
||||||
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
|
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
|
||||||
log_check_call(['/usr/bin/qemu-img', 'create', '-f', 'raw', self.image_path, vol_size])
|
log_check_call(['qemu-img', 'create', '-f', 'raw', self.image_path, vol_size])
|
||||||
|
|
||||||
def _before_attach(self, e):
|
def _before_attach(self, e):
|
||||||
[self.loop_device_path] = log_check_call(['/sbin/losetup', '--show', '--find', self.image_path])
|
[self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path])
|
||||||
self.device_path = self.loop_device_path
|
self.device_path = self.loop_device_path
|
||||||
|
|
||||||
def _before_detach(self, e):
|
def _before_detach(self, e):
|
||||||
log_check_call(['/sbin/losetup', '--detach', self.loop_device_path])
|
log_check_call(['losetup', '--detach', self.loop_device_path])
|
||||||
del self.loop_device_path
|
del self.loop_device_path
|
||||||
del self.device_path
|
del self.device_path
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ class QEMUVolume(LoopbackVolume):
|
||||||
def _before_create(self, e):
|
def _before_create(self, e):
|
||||||
self.image_path = e.image_path
|
self.image_path = e.image_path
|
||||||
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
|
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
|
||||||
log_check_call(['/usr/bin/qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size])
|
log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size])
|
||||||
|
|
||||||
def _check_nbd_module(self):
|
def _check_nbd_module(self):
|
||||||
from base.fs.partitionmaps.none import NoPartitions
|
from base.fs.partitionmaps.none import NoPartitions
|
||||||
|
@ -40,11 +40,11 @@ class QEMUVolume(LoopbackVolume):
|
||||||
def _before_attach(self, e):
|
def _before_attach(self, e):
|
||||||
self._check_nbd_module()
|
self._check_nbd_module()
|
||||||
self.loop_device_path = self._find_free_nbd_device()
|
self.loop_device_path = self._find_free_nbd_device()
|
||||||
log_check_call(['/usr/bin/qemu-nbd', '--connect', self.loop_device_path, self.image_path])
|
log_check_call(['qemu-nbd', '--connect', self.loop_device_path, self.image_path])
|
||||||
self.device_path = self.loop_device_path
|
self.device_path = self.loop_device_path
|
||||||
|
|
||||||
def _before_detach(self, e):
|
def _before_detach(self, e):
|
||||||
log_check_call(['/usr/bin/qemu-nbd', '--disconnect', self.loop_device_path])
|
log_check_call(['qemu-nbd', '--disconnect', self.loop_device_path])
|
||||||
del self.loop_device_path
|
del self.loop_device_path
|
||||||
del self.device_path
|
del self.device_path
|
||||||
|
|
||||||
|
|
|
@ -12,19 +12,21 @@ from common.tasks import security
|
||||||
from common.tasks import locale
|
from common.tasks import locale
|
||||||
|
|
||||||
base_set = [workspace.CreateWorkspace,
|
base_set = [workspace.CreateWorkspace,
|
||||||
host.HostDependencies,
|
bootstrap.AddRequiredCommands,
|
||||||
host.CheckHostDependencies,
|
host.CheckExternalCommands,
|
||||||
bootstrap.Bootstrap,
|
bootstrap.Bootstrap,
|
||||||
workspace.DeleteWorkspace,
|
workspace.DeleteWorkspace,
|
||||||
]
|
]
|
||||||
|
|
||||||
volume_set = [volume.Attach,
|
volume_set = [volume.Attach,
|
||||||
volume.Detach,
|
volume.Detach,
|
||||||
|
filesystem.AddRequiredCommands,
|
||||||
filesystem.Format,
|
filesystem.Format,
|
||||||
filesystem.FStab,
|
filesystem.FStab,
|
||||||
]
|
]
|
||||||
|
|
||||||
partitioning_set = [partitioning.PartitionVolume,
|
partitioning_set = [partitioning.AddRequiredCommands,
|
||||||
|
partitioning.PartitionVolume,
|
||||||
partitioning.MapPartitions,
|
partitioning.MapPartitions,
|
||||||
partitioning.UnmapPartitions,
|
partitioning.UnmapPartitions,
|
||||||
]
|
]
|
||||||
|
|
|
@ -26,11 +26,11 @@ class AddDefaultSources(Task):
|
||||||
sections = 'main'
|
sections = 'main'
|
||||||
if 'sections' in info.manifest.system:
|
if 'sections' in info.manifest.system:
|
||||||
sections = ' '.join(info.manifest.system['sections'])
|
sections = ' '.join(info.manifest.system['sections'])
|
||||||
info.source_lists.add('main', 'deb {apt_mirror} {system.release} '+sections)
|
info.source_lists.add('main', 'deb {apt_mirror} {system.release} ' + sections)
|
||||||
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} '+sections)
|
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} ' + sections)
|
||||||
if info.manifest.system['release'] not in {'testing', 'unstable'}:
|
if info.manifest.system['release'] not in {'testing', 'unstable'}:
|
||||||
info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates '+sections)
|
info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates ' + sections)
|
||||||
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates '+sections)
|
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates ' + sections)
|
||||||
|
|
||||||
|
|
||||||
class InstallTrustedKeys(Task):
|
class InstallTrustedKeys(Task):
|
||||||
|
@ -87,8 +87,8 @@ class AptUpdate(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'update'])
|
'apt-get', 'update'])
|
||||||
|
|
||||||
|
|
||||||
class AptUpgrade(Task):
|
class AptUpgrade(Task):
|
||||||
|
@ -100,15 +100,15 @@ class AptUpgrade(Task):
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
try:
|
try:
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'install',
|
'apt-get', 'install',
|
||||||
'--fix-broken',
|
'--fix-broken',
|
||||||
'--no-install-recommends',
|
'--no-install-recommends',
|
||||||
'--assume-yes'])
|
'--assume-yes'])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'upgrade',
|
'apt-get', 'upgrade',
|
||||||
'--no-install-recommends',
|
'--no-install-recommends',
|
||||||
'--assume-yes'])
|
'--assume-yes'])
|
||||||
except CalledProcessError as e:
|
except CalledProcessError as e:
|
||||||
if e.returncode == 100:
|
if e.returncode == 100:
|
||||||
import logging
|
import logging
|
||||||
|
@ -125,9 +125,9 @@ class PurgeUnusedPackages(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'autoremove',
|
'apt-get', 'autoremove',
|
||||||
'--purge'])
|
'--purge'])
|
||||||
|
|
||||||
|
|
||||||
class AptClean(Task):
|
class AptClean(Task):
|
||||||
|
@ -136,8 +136,8 @@ class AptClean(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'clean'])
|
'apt-get', 'clean'])
|
||||||
|
|
||||||
lists = os.path.join(info.root, 'var/lib/apt/lists')
|
lists = os.path.join(info.root, 'var/lib/apt/lists')
|
||||||
for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]:
|
for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]:
|
||||||
|
|
|
@ -91,9 +91,9 @@ class InstallGrub(Task):
|
||||||
idx=idx + 1))
|
idx=idx + 1))
|
||||||
|
|
||||||
# Install grub
|
# Install grub
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/sbin/grub-install', device_path])
|
'grub-install', device_path])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub'])
|
log_check_call(['chroot', info.root, 'update-grub'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if isinstance(info.volume, LoopbackVolume):
|
if isinstance(info.volume, LoopbackVolume):
|
||||||
remount(info.volume, unlink_fn)
|
remount(info.volume, unlink_fn)
|
||||||
|
@ -127,12 +127,12 @@ class InstallExtLinux(Task):
|
||||||
bootloader = '/usr/lib/syslinux/gptmbr.bin'
|
bootloader = '/usr/lib/syslinux/gptmbr.bin'
|
||||||
else:
|
else:
|
||||||
bootloader = '/usr/lib/extlinux/mbr.bin'
|
bootloader = '/usr/lib/extlinux/mbr.bin'
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/bin/dd', 'bs=440', 'count=1',
|
'dd', 'bs=440', 'count=1',
|
||||||
'if=' + bootloader,
|
'if=' + bootloader,
|
||||||
'of=' + info.volume.device_path])
|
'of=' + info.volume.device_path])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/extlinux',
|
'extlinux',
|
||||||
'--install', '/boot/extlinux'])
|
'--install', '/boot/extlinux'])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/sbin/extlinux-update'])
|
'extlinux-update'])
|
||||||
|
|
|
@ -1,12 +1,23 @@
|
||||||
from base import Task
|
from base import Task
|
||||||
from common import phases
|
from common import phases
|
||||||
from common.exceptions import TaskError
|
from common.exceptions import TaskError
|
||||||
|
import host
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AddRequiredCommands(Task):
|
||||||
|
description = 'Adding commands required bootstrapping Debian'
|
||||||
|
phase = phases.preparation
|
||||||
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
info.host_dependencies['debootstrap'] = 'debootstrap'
|
||||||
|
|
||||||
|
|
||||||
def get_bootstrap_args(info):
|
def get_bootstrap_args(info):
|
||||||
executable = ['/usr/sbin/debootstrap']
|
executable = ['debootstrap']
|
||||||
options = ['--arch=' + info.manifest.system['architecture']]
|
options = ['--arch=' + info.manifest.system['architecture']]
|
||||||
if len(info.include_packages) > 0:
|
if len(info.include_packages) > 0:
|
||||||
options.append('--include=' + ','.join(info.include_packages))
|
options.append('--include=' + ','.join(info.include_packages))
|
||||||
|
|
|
@ -29,7 +29,7 @@ class ShredHostkeys(Task):
|
||||||
public = [path + '.pub' for path in private]
|
public = [path + '.pub' for path in private]
|
||||||
|
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/bin/shred', '--remove'] + private + public)
|
log_check_call(['shred', '--remove'] + private + public)
|
||||||
|
|
||||||
|
|
||||||
class CleanTMP(Task):
|
class CleanTMP(Task):
|
||||||
|
|
|
@ -2,10 +2,22 @@ from base import Task
|
||||||
from common import phases
|
from common import phases
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
from bootstrap import Bootstrap
|
from bootstrap import Bootstrap
|
||||||
from common.tasks import apt
|
import apt
|
||||||
|
import host
|
||||||
import volume
|
import volume
|
||||||
|
|
||||||
|
|
||||||
|
class AddRequiredCommands(Task):
|
||||||
|
description = 'Adding commands required for formatting the partitions'
|
||||||
|
phase = phases.preparation
|
||||||
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions):
|
||||||
|
info.host_dependencies['mkfs.xfs'] = 'xfsprogs'
|
||||||
|
|
||||||
|
|
||||||
class Format(Task):
|
class Format(Task):
|
||||||
description = 'Formatting the volume'
|
description = 'Formatting the volume'
|
||||||
phase = phases.volume_preparation
|
phase = phases.volume_preparation
|
||||||
|
@ -31,7 +43,7 @@ class TuneVolumeFS(Task):
|
||||||
for partition in info.volume.partition_map.partitions:
|
for partition in info.volume.partition_map.partitions:
|
||||||
if not isinstance(partition, UnformattedPartition):
|
if not isinstance(partition, UnformattedPartition):
|
||||||
if re.match('^ext[2-4]$', partition.filesystem) is not None:
|
if re.match('^ext[2-4]$', partition.filesystem) is not None:
|
||||||
log_check_call(['/sbin/tune2fs', '-i', '0', partition.device_path])
|
log_check_call(['tune2fs', '-i', '0', partition.device_path])
|
||||||
|
|
||||||
|
|
||||||
class AddXFSProgs(Task):
|
class AddXFSProgs(Task):
|
||||||
|
|
|
@ -3,47 +3,29 @@ from common import phases
|
||||||
from common.exceptions import TaskError
|
from common.exceptions import TaskError
|
||||||
|
|
||||||
|
|
||||||
class HostDependencies(Task):
|
class CheckExternalCommands(Task):
|
||||||
description = 'Determining required host dependencies'
|
description = 'Checking availability of external commands'
|
||||||
phase = phases.preparation
|
phase = phases.preparation
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def run(cls, info):
|
|
||||||
info.host_dependencies.add('debootstrap')
|
|
||||||
|
|
||||||
from common.fs.loopbackvolume import LoopbackVolume
|
|
||||||
if isinstance(info.volume, LoopbackVolume):
|
|
||||||
info.host_dependencies.add('qemu-utils')
|
|
||||||
|
|
||||||
if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions):
|
|
||||||
info.host_dependencies.add('xfsprogs')
|
|
||||||
|
|
||||||
from base.fs.partitionmaps.none import NoPartitions
|
|
||||||
if not isinstance(info.volume.partition_map, NoPartitions):
|
|
||||||
info.host_dependencies.update(['parted', 'kpartx'])
|
|
||||||
|
|
||||||
|
|
||||||
class CheckHostDependencies(Task):
|
|
||||||
description = 'Checking installed host packages'
|
|
||||||
phase = phases.preparation
|
|
||||||
predecessors = [HostDependencies]
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
import re
|
||||||
missing_packages = []
|
missing_packages = []
|
||||||
for package in info.host_dependencies:
|
for command, package in info.host_dependencies.items():
|
||||||
try:
|
try:
|
||||||
import os.path
|
log_check_call(['type ' + command], shell=True)
|
||||||
if os.path.isfile('/usr/bin/dpkg-query'):
|
|
||||||
log_check_call(['/usr/bin/dpkg-query', '-s', package])
|
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
missing_packages.append(package)
|
if re.match('^https?:\/\/', package):
|
||||||
|
msg = ('The command `{command}\' is not available, '
|
||||||
|
'you can download the software at `{package}\'.'
|
||||||
|
.format(command=command, package=package))
|
||||||
|
else:
|
||||||
|
msg = ('The command `{command}\' is not available, '
|
||||||
|
'it is located in the package `{package}\'.'
|
||||||
|
.format(command=command, package=package))
|
||||||
|
missing_packages.append(msg)
|
||||||
if len(missing_packages) > 0:
|
if len(missing_packages) > 0:
|
||||||
pkgs = '\', `'.join(missing_packages)
|
msg = '\n'.join(missing_packages)
|
||||||
if len(missing_packages) > 1:
|
|
||||||
msg = "The packages `{packages}\' are not installed".format(packages=pkgs)
|
|
||||||
else:
|
|
||||||
msg = "The package `{packages}\' is not installed".format(packages=pkgs)
|
|
||||||
raise TaskError(msg)
|
raise TaskError(msg)
|
||||||
|
|
|
@ -21,10 +21,10 @@ class InstallInitScripts(Task):
|
||||||
dst = os.path.join(info.root, 'etc/init.d', name)
|
dst = os.path.join(info.root, 'etc/init.d', name)
|
||||||
copy(src, dst)
|
copy(src, dst)
|
||||||
os.chmod(dst, rwxr_xr_x)
|
os.chmod(dst, rwxr_xr_x)
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--default', name])
|
log_check_call(['chroot', info.root, 'insserv', '--default', name])
|
||||||
|
|
||||||
for name in info.initd['disable']:
|
for name in info.initd['disable']:
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--remove', name])
|
log_check_call(['chroot', info.root, 'insserv', '--remove', name])
|
||||||
|
|
||||||
|
|
||||||
class AddExpandRoot(Task):
|
class AddExpandRoot(Task):
|
||||||
|
@ -49,8 +49,8 @@ class AddSSHKeyGeneration(Task):
|
||||||
install = info.initd['install']
|
install = info.initd['install']
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
try:
|
try:
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/dpkg-query', '-W', 'openssh-server'])
|
'dpkg-query', '-W', 'openssh-server'])
|
||||||
if info.manifest.system['release'] == 'squeeze':
|
if info.manifest.system['release'] == 'squeeze':
|
||||||
install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys')
|
install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys')
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -28,12 +28,12 @@ class GenerateLocale(Task):
|
||||||
search = '# ' + locale_str
|
search = '# ' + locale_str
|
||||||
sed_i(locale_gen, search, locale_str)
|
sed_i(locale_gen, search, locale_str)
|
||||||
|
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/locale-gen'])
|
log_check_call(['chroot', info.root, 'locale-gen'])
|
||||||
|
|
||||||
lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'],
|
lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'],
|
||||||
charmap=info.manifest.system['charmap'])
|
charmap=info.manifest.system['charmap'])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/sbin/update-locale', 'LANG=' + lang])
|
'update-locale', 'LANG=' + lang])
|
||||||
|
|
||||||
|
|
||||||
class SetTimezone(Task):
|
class SetTimezone(Task):
|
||||||
|
|
|
@ -1,8 +1,25 @@
|
||||||
from base import Task
|
from base import Task
|
||||||
from common import phases
|
from common import phases
|
||||||
|
import host
|
||||||
import volume
|
import volume
|
||||||
|
|
||||||
|
|
||||||
|
class AddRequiredCommands(Task):
|
||||||
|
description = 'Adding commands required for creating loopback volumes'
|
||||||
|
phase = phases.preparation
|
||||||
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
from common.fs.loopbackvolume import LoopbackVolume
|
||||||
|
if isinstance(info.volume, LoopbackVolume):
|
||||||
|
info.host_dependencies['qemu-img'] = 'qemu-utils'
|
||||||
|
info.host_dependencies['losetup'] = 'mount'
|
||||||
|
from common.fs.qemuvolume import QEMUVolume
|
||||||
|
if isinstance(info.volume, QEMUVolume):
|
||||||
|
info.host_dependencies['losetup'] = 'mount'
|
||||||
|
|
||||||
|
|
||||||
class Create(Task):
|
class Create(Task):
|
||||||
description = 'Creating a loopback volume'
|
description = 'Creating a loopback volume'
|
||||||
phase = phases.volume_creation
|
phase = phases.volume_creation
|
||||||
|
|
|
@ -1,19 +1,12 @@
|
||||||
|
// This is a mapping of Debian release codenames to NIC configurations
|
||||||
|
// Every item in an array is a line
|
||||||
{
|
{
|
||||||
"squeeze": [
|
"squeeze": ["auto lo",
|
||||||
"auto lo",
|
"iface lo inet loopback",
|
||||||
"iface lo inet loopback",
|
"auto eth0",
|
||||||
"auto eth0",
|
"iface eth0 inet dhcp"],
|
||||||
"iface eth0 inet dhcp" ],
|
"wheezy": ["auto eth0",
|
||||||
"wheezy": [
|
"iface eth0 inet dhcp"],
|
||||||
"auto eth0",
|
"jessie": ["auto eth0",
|
||||||
"iface eth0 inet dhcp" ],
|
"iface eth0 inet dhcp"]
|
||||||
"jessie": [
|
|
||||||
"auto eth0",
|
|
||||||
"iface eth0 inet dhcp" ],
|
|
||||||
"testing": [
|
|
||||||
"auto eth0",
|
|
||||||
"iface eth0 inet dhcp" ],
|
|
||||||
"unstable": [
|
|
||||||
"auto eth0",
|
|
||||||
"iface eth0 inet dhcp" ]
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from base import Task
|
from base import Task
|
||||||
from common import phases
|
from common import phases
|
||||||
import os.path
|
import os
|
||||||
|
|
||||||
|
|
||||||
class RemoveDNSInfo(Task):
|
class RemoveDNSInfo(Task):
|
||||||
|
@ -9,10 +9,8 @@ class RemoveDNSInfo(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from os import remove
|
|
||||||
import os.path
|
|
||||||
if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')):
|
if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')):
|
||||||
remove(os.path.join(info.root, 'etc/resolv.conf'))
|
os.remove(os.path.join(info.root, 'etc/resolv.conf'))
|
||||||
|
|
||||||
|
|
||||||
class RemoveHostname(Task):
|
class RemoveHostname(Task):
|
||||||
|
@ -21,10 +19,8 @@ class RemoveHostname(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from os import remove
|
|
||||||
import os.path
|
|
||||||
if os.path.isfile(os.path.join(info.root, 'etc/hostname')):
|
if os.path.isfile(os.path.join(info.root, 'etc/hostname')):
|
||||||
remove(os.path.join(info.root, 'etc/hostname'))
|
os.remove(os.path.join(info.root, 'etc/hostname'))
|
||||||
|
|
||||||
|
|
||||||
class ConfigureNetworkIF(Task):
|
class ConfigureNetworkIF(Task):
|
||||||
|
@ -33,10 +29,10 @@ class ConfigureNetworkIF(Task):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
|
network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.json')
|
||||||
|
from common.tools import config_get
|
||||||
|
if_config = config_get(network_config_path, [info.release_codename])
|
||||||
|
|
||||||
interfaces_path = os.path.join(info.root, 'etc/network/interfaces')
|
interfaces_path = os.path.join(info.root, 'etc/network/interfaces')
|
||||||
if_config = []
|
|
||||||
with open('common/tasks/network-configuration.json') as stream:
|
|
||||||
import json
|
|
||||||
if_config = json.loads(stream.read())
|
|
||||||
with open(interfaces_path, 'a') as interfaces:
|
with open(interfaces_path, 'a') as interfaces:
|
||||||
interfaces.write('\n'.join(if_config.get(info.manifest.system['release'])) + '\n')
|
interfaces.write('\n'.join(if_config) + '\n')
|
||||||
|
|
|
@ -45,10 +45,10 @@ class InstallPackages(Task):
|
||||||
try:
|
try:
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/apt-get', 'install',
|
'apt-get', 'install',
|
||||||
'--no-install-recommends',
|
'--no-install-recommends',
|
||||||
'--assume-yes']
|
'--assume-yes']
|
||||||
+ map(str, remote_packages),
|
+ map(str, remote_packages),
|
||||||
env=env)
|
env=env)
|
||||||
except CalledProcessError as e:
|
except CalledProcessError as e:
|
||||||
|
@ -90,8 +90,8 @@ class InstallPackages(Task):
|
||||||
|
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/dpkg', '--install']
|
'dpkg', '--install']
|
||||||
+ chrooted_package_paths,
|
+ chrooted_package_paths,
|
||||||
env=env)
|
env=env)
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,23 @@
|
||||||
from base import Task
|
from base import Task
|
||||||
from common import phases
|
from common import phases
|
||||||
import filesystem
|
import filesystem
|
||||||
|
import host
|
||||||
import volume
|
import volume
|
||||||
|
|
||||||
|
|
||||||
|
class AddRequiredCommands(Task):
|
||||||
|
description = 'Adding commands required for partitioning the volume'
|
||||||
|
phase = phases.preparation
|
||||||
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
from base.fs.partitionmaps.none import NoPartitions
|
||||||
|
if not isinstance(info.volume.partition_map, NoPartitions):
|
||||||
|
info.host_dependencies['parted'] = 'parted'
|
||||||
|
info.host_dependencies['kpartx'] = 'kpartx'
|
||||||
|
|
||||||
|
|
||||||
class PartitionVolume(Task):
|
class PartitionVolume(Task):
|
||||||
description = 'Partitioning the volume'
|
description = 'Partitioning the volume'
|
||||||
phase = phases.volume_preparation
|
phase = phases.volume_preparation
|
||||||
|
|
|
@ -10,7 +10,7 @@ class EnableShadowConfig(Task):
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/shadowconfig', 'on'])
|
log_check_call(['chroot', info.root, 'shadowconfig', 'on'])
|
||||||
|
|
||||||
|
|
||||||
class DisableSSHPasswordAuthentication(Task):
|
class DisableSSHPasswordAuthentication(Task):
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
|
|
||||||
|
|
||||||
def log_check_call(command, stdin=None, env=None):
|
def log_check_call(command, stdin=None, env=None, shell=False):
|
||||||
status, stdout, stderr = log_call(command, stdin, env)
|
status, stdout, stderr = log_call(command, stdin, env, shell)
|
||||||
if status != 0:
|
if status != 0:
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
raise CalledProcessError(status, ' '.join(command), '\n'.join(stderr))
|
raise CalledProcessError(status, ' '.join(command), '\n'.join(stderr))
|
||||||
return stdout
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
def log_call(command, stdin=None, env=None):
|
def log_call(command, stdin=None, env=None, shell=False):
|
||||||
import subprocess
|
import subprocess
|
||||||
import select
|
import select
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ def log_call(command, stdin=None, env=None):
|
||||||
|
|
||||||
popen_args = {'args': command,
|
popen_args = {'args': command,
|
||||||
'env': env,
|
'env': env,
|
||||||
|
'shell': shell,
|
||||||
'stdin': subprocess.PIPE,
|
'stdin': subprocess.PIPE,
|
||||||
'stdout': subprocess.PIPE,
|
'stdout': subprocess.PIPE,
|
||||||
'stderr': subprocess.PIPE, }
|
'stderr': subprocess.PIPE, }
|
||||||
|
@ -56,3 +57,17 @@ def sed_i(file_path, pattern, subst):
|
||||||
import re
|
import re
|
||||||
for line in fileinput.input(files=file_path, inplace=True):
|
for line in fileinput.input(files=file_path, inplace=True):
|
||||||
print re.sub(pattern, subst, line),
|
print re.sub(pattern, subst, line),
|
||||||
|
|
||||||
|
|
||||||
|
def load_json(path):
|
||||||
|
import json
|
||||||
|
from minify_json import json_minify
|
||||||
|
with open(path) as stream:
|
||||||
|
return json.loads(json_minify(stream.read(), False))
|
||||||
|
|
||||||
|
|
||||||
|
def config_get(path, config_path):
|
||||||
|
config = load_json(path)
|
||||||
|
for key in config_path:
|
||||||
|
config = config.get(key)
|
||||||
|
return config
|
||||||
|
|
|
@ -2,20 +2,20 @@
|
||||||
"provider": "kvm",
|
"provider": "kvm",
|
||||||
"bootstrapper": {
|
"bootstrapper": {
|
||||||
"workspace": "/target",
|
"workspace": "/target",
|
||||||
"mirror": "http://ftp.fr.debian.org/debian/",
|
"mirror": "http://ftp.fr.debian.org/debian/"
|
||||||
"virtio" : [ "virtio_pci", "virtio_blk" ]
|
|
||||||
},
|
},
|
||||||
"image": {
|
"image": {
|
||||||
"name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}",
|
"name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}",
|
||||||
"description": "Debian {system.release} {system.architecture}"
|
"description": "Debian {system.release} {system.architecture}"
|
||||||
},
|
},
|
||||||
"system": {
|
"system": {
|
||||||
"release": "wheezy",
|
"release": "wheezy",
|
||||||
"architecture": "amd64",
|
"architecture": "amd64",
|
||||||
"bootloader": "grub",
|
"bootloader": "grub",
|
||||||
"timezone": "UTC",
|
"timezone": "UTC",
|
||||||
"locale": "en_US",
|
"locale": "en_US",
|
||||||
"charmap": "UTF-8"
|
"charmap": "UTF-8",
|
||||||
|
"virtio_modules": [ "virtio_pci", "virtio_blk" ]
|
||||||
},
|
},
|
||||||
"packages": {},
|
"packages": {},
|
||||||
"volume": {
|
"volume": {
|
||||||
|
|
|
@ -22,8 +22,8 @@ class CreateAdminUser(Task):
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/sbin/useradd',
|
'useradd',
|
||||||
'--create-home', '--shell', '/bin/bash',
|
'--create-home', '--shell', '/bin/bash',
|
||||||
info.manifest.plugins['admin_user']['username']])
|
info.manifest.plugins['admin_user']['username']])
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ class DisableRootLogin(Task):
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
try:
|
try:
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/dpkg-query', '-W', 'openssh-server'])
|
'dpkg-query', '-W', 'openssh-server'])
|
||||||
from common.tools import sed_i
|
from common.tools import sed_i
|
||||||
sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config')
|
sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config')
|
||||||
sed_i(sshdconfig_path, 'PermitRootLogin yes', 'PermitRootLogin no')
|
sed_i(sshdconfig_path, 'PermitRootLogin yes', 'PermitRootLogin no')
|
||||||
|
|
|
@ -68,7 +68,7 @@ class SetMetadataSource(Task):
|
||||||
logging.getLogger(__name__).warn(msg)
|
logging.getLogger(__name__).warn(msg)
|
||||||
return
|
return
|
||||||
sources = "cloud-init cloud-init/datasources multiselect " + sources
|
sources = "cloud-init cloud-init/datasources multiselect " + sources
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/bin/debconf-set-selections'], sources)
|
log_check_call(['chroot', info.root, 'debconf-set-selections'], sources)
|
||||||
|
|
||||||
|
|
||||||
class DisableModules(Task):
|
class DisableModules(Task):
|
||||||
|
|
|
@ -5,9 +5,6 @@ def validate_manifest(data, validator, error):
|
||||||
import os.path
|
import os.path
|
||||||
schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.json')
|
schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.json')
|
||||||
validator(data, schema_path)
|
validator(data, schema_path)
|
||||||
if 'zerofree' in data['plugins']['minimize_size']:
|
|
||||||
zerofree_schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema-zerofree.json')
|
|
||||||
validator(data, zerofree_schema_path)
|
|
||||||
if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk':
|
if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk':
|
||||||
error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink'])
|
error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink'])
|
||||||
|
|
||||||
|
@ -16,11 +13,11 @@ def resolve_tasks(taskset, manifest):
|
||||||
taskset.update([tasks.AddFolderMounts,
|
taskset.update([tasks.AddFolderMounts,
|
||||||
tasks.RemoveFolderMounts,
|
tasks.RemoveFolderMounts,
|
||||||
])
|
])
|
||||||
if 'zerofree' in manifest.plugins['minimize_size']:
|
if manifest.plugins['minimize_size'].get('zerofree', False):
|
||||||
taskset.add(tasks.CheckZerofreePath)
|
taskset.add(tasks.AddRequiredCommands)
|
||||||
taskset.add(tasks.Zerofree)
|
taskset.add(tasks.Zerofree)
|
||||||
if manifest.plugins['minimize_size'].get('shrink', False):
|
if manifest.plugins['minimize_size'].get('shrink', False):
|
||||||
taskset.add(tasks.CheckVMWareDMCommand)
|
taskset.add(tasks.AddRequiredCommands)
|
||||||
taskset.add(tasks.ShrinkVolume)
|
taskset.add(tasks.ShrinkVolume)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"title": "Minimize size plugin manifest",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"volume": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"partitions": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": { "enum": ["none"] }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -10,16 +10,10 @@
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"shrink": { "type": "boolean" },
|
"shrink": { "type": "boolean" },
|
||||||
"zerofree": { "$ref": "#/definitions/absolute_path" }
|
"zerofree": { "type": "boolean" }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"definitions": {
|
|
||||||
"absolute_path": {
|
|
||||||
"type": "string",
|
|
||||||
"pattern": "^/[^\\0]+$"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ from common import phases
|
||||||
from common.tasks import apt
|
from common.tasks import apt
|
||||||
from common.tasks import bootstrap
|
from common.tasks import bootstrap
|
||||||
from common.tasks import filesystem
|
from common.tasks import filesystem
|
||||||
|
from common.tasks import host
|
||||||
from common.tasks import partitioning
|
from common.tasks import partitioning
|
||||||
from common.tasks import volume
|
from common.tasks import volume
|
||||||
import os
|
import os
|
||||||
|
@ -46,48 +47,30 @@ class RemoveFolderMounts(Task):
|
||||||
del info.minimize_size_folder
|
del info.minimize_size_folder
|
||||||
|
|
||||||
|
|
||||||
class CheckZerofreePath(Task):
|
class AddRequiredCommands(Task):
|
||||||
description = 'Checking path to zerofree tool'
|
description = 'Adding commands required for reducing volume size'
|
||||||
phase = phases.preparation
|
phase = phases.preparation
|
||||||
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.exceptions import TaskError
|
if info.manifest.plugins['minimize_size'].get('zerofree', False):
|
||||||
import os
|
info.host_dependencies['zerofree'] = 'zerofree'
|
||||||
zerofree = info.manifest.plugins['minimize_size']['zerofree']
|
if info.manifest.plugins['minimize_size'].get('shrink', False):
|
||||||
if not os.path.isfile(zerofree):
|
link = 'https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0'
|
||||||
raise TaskError('The path `{path}\' does not exist or is not a file'.format(path=zerofree))
|
info.host_dependencies['vmware-vdiskmanager'] = link
|
||||||
if not os.access(zerofree, os.X_OK):
|
|
||||||
raise TaskError('The path `{path}\' is not executable'.format(path=zerofree))
|
|
||||||
|
|
||||||
|
|
||||||
# Get zerofree here: http://intgat.tigress.co.uk/rmy/uml/index.html
|
|
||||||
class Zerofree(Task):
|
class Zerofree(Task):
|
||||||
description = 'Zeroing unused blocks on the volume'
|
description = 'Zeroing unused blocks on the root partition'
|
||||||
phase = phases.volume_unmounting
|
phase = phases.volume_unmounting
|
||||||
predecessors = [filesystem.UnmountRoot, partitioning.UnmapPartitions]
|
predecessors = [filesystem.UnmountRoot]
|
||||||
successors = [volume.Detach]
|
successors = [partitioning.UnmapPartitions, volume.Detach]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
zerofree = info.manifest.plugins['minimize_size']['zerofree']
|
log_check_call(['zerofree', info.volume.partition_map.root.device_path])
|
||||||
log_check_call([zerofree, info.volume.device_path])
|
|
||||||
|
|
||||||
|
|
||||||
class CheckVMWareDMCommand(Task):
|
|
||||||
description = 'Checking path to vmware-vdiskmanager tool'
|
|
||||||
phase = phases.preparation
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def run(cls, info):
|
|
||||||
from common.exceptions import TaskError
|
|
||||||
import os
|
|
||||||
vdiskmngr = '/usr/bin/vmware-vdiskmanager'
|
|
||||||
if not os.path.isfile(vdiskmngr):
|
|
||||||
raise TaskError('Unable to find vmware-vdiskmanager at `{path}\''.format(path=vdiskmngr))
|
|
||||||
if not os.access(vdiskmngr, os.X_OK):
|
|
||||||
raise TaskError('vmware-vdiskmanager at `{path}\' is not executable'.format(path=vdiskmngr))
|
|
||||||
|
|
||||||
|
|
||||||
class ShrinkVolume(Task):
|
class ShrinkVolume(Task):
|
||||||
|
|
|
@ -8,9 +8,10 @@ def validate_manifest(data, validator, error):
|
||||||
|
|
||||||
|
|
||||||
def resolve_tasks(taskset, manifest):
|
def resolve_tasks(taskset, manifest):
|
||||||
taskset.add(tasks.CheckPaths)
|
|
||||||
taskset.add(tasks.AddPackages)
|
taskset.add(tasks.AddPackages)
|
||||||
if 'assets' in manifest.plugins['puppet']:
|
if 'assets' in manifest.plugins['puppet']:
|
||||||
|
taskset.add(tasks.CheckAssetsPath)
|
||||||
taskset.add(tasks.CopyPuppetAssets)
|
taskset.add(tasks.CopyPuppetAssets)
|
||||||
if 'manifest' in manifest.plugins['puppet']:
|
if 'manifest' in manifest.plugins['puppet']:
|
||||||
|
taskset.add(tasks.CheckManifestPath)
|
||||||
taskset.add(tasks.ApplyPuppetManifest)
|
taskset.add(tasks.ApplyPuppetManifest)
|
||||||
|
|
|
@ -5,8 +5,8 @@ from common.tasks import network
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
class CheckPaths(Task):
|
class CheckAssetsPath(Task):
|
||||||
description = 'Checking whether manifest and assets paths exist'
|
description = 'Checking whether the assets path exist'
|
||||||
phase = phases.preparation
|
phase = phases.preparation
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -20,6 +20,14 @@ class CheckPaths(Task):
|
||||||
msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets)
|
msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets)
|
||||||
raise TaskError(msg)
|
raise TaskError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class CheckManifestPath(Task):
|
||||||
|
description = 'Checking whether the manifest path exist'
|
||||||
|
phase = phases.preparation
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
from common.exceptions import TaskError
|
||||||
manifest = info.manifest.plugins['puppet']['manifest']
|
manifest = info.manifest.plugins['puppet']['manifest']
|
||||||
if not os.path.exists(manifest):
|
if not os.path.exists(manifest):
|
||||||
msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest)
|
msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest)
|
||||||
|
@ -84,10 +92,20 @@ class ApplyPuppetManifest(Task):
|
||||||
|
|
||||||
manifest_path = os.path.join('/', manifest_rel_dst)
|
manifest_path = os.path.join('/', manifest_rel_dst)
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/puppet', 'apply', manifest_path])
|
'puppet', 'apply', manifest_path])
|
||||||
os.remove(manifest_dst)
|
os.remove(manifest_dst)
|
||||||
|
|
||||||
from common.tools import sed_i
|
from common.tools import sed_i
|
||||||
hosts_path = os.path.join(info.root, 'etc/hosts')
|
hosts_path = os.path.join(info.root, 'etc/hosts')
|
||||||
sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '')
|
sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '')
|
||||||
|
|
||||||
|
|
||||||
|
class EnableAgent(Task):
|
||||||
|
description = 'Enabling the puppet agent'
|
||||||
|
phase = phases.system_modification
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run(cls, info):
|
||||||
|
puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet')
|
||||||
|
sed_i(puppet_defaults, 'START=no', 'START=yes')
|
||||||
|
|
|
@ -9,5 +9,5 @@ class SetRootPassword(Task):
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'],
|
log_check_call(['chroot', info.root, 'chpasswd'],
|
||||||
'root:' + info.manifest.plugins['root_password']['password'])
|
'root:' + info.manifest.plugins['root_password']['password'])
|
||||||
|
|
|
@ -71,8 +71,8 @@ class CreateVagrantUser(Task):
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/sbin/useradd',
|
'useradd',
|
||||||
'--create-home', '--shell', '/bin/bash',
|
'--create-home', '--shell', '/bin/bash',
|
||||||
'vagrant'])
|
'vagrant'])
|
||||||
|
|
||||||
|
@ -115,8 +115,8 @@ class AddInsecurePublicKey(Task):
|
||||||
|
|
||||||
# We can't do this directly with python, since getpwnam gets its info from the host
|
# We can't do this directly with python, since getpwnam gets its info from the host
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/bin/chown', 'vagrant:vagrant',
|
'chown', 'vagrant:vagrant',
|
||||||
'/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys'])
|
'/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys'])
|
||||||
|
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ class SetRootPassword(Task):
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'], 'root:vagrant')
|
log_check_call(['chroot', info.root, 'chpasswd'], 'root:vagrant')
|
||||||
|
|
||||||
|
|
||||||
class PackageBox(Task):
|
class PackageBox(Task):
|
||||||
|
|
|
@ -61,7 +61,7 @@ def resolve_tasks(taskset, manifest):
|
||||||
if manifest.volume['partitions']['type'] != 'none':
|
if manifest.volume['partitions']['type'] != 'none':
|
||||||
taskset.update(common.task_sets.partitioning_set)
|
taskset.update(common.task_sets.partitioning_set)
|
||||||
|
|
||||||
taskset.update([tasks.host.HostDependencies,
|
taskset.update([tasks.host.AddExternalCommands,
|
||||||
tasks.packages.DefaultPackages,
|
tasks.packages.DefaultPackages,
|
||||||
tasks.connection.GetCredentials,
|
tasks.connection.GetCredentials,
|
||||||
tasks.host.GetInfo,
|
tasks.host.GetInfo,
|
||||||
|
@ -97,7 +97,8 @@ def resolve_tasks(taskset, manifest):
|
||||||
tasks.ebs.Attach,
|
tasks.ebs.Attach,
|
||||||
filesystem.FStab,
|
filesystem.FStab,
|
||||||
tasks.ebs.Snapshot],
|
tasks.ebs.Snapshot],
|
||||||
's3': [loopback.Create,
|
's3': [loopback.AddRequiredCommands,
|
||||||
|
loopback.Create,
|
||||||
volume.Attach,
|
volume.Attach,
|
||||||
tasks.filesystem.S3FStab,
|
tasks.filesystem.S3FStab,
|
||||||
tasks.ami.BundleImage,
|
tasks.ami.BundleImage,
|
||||||
|
|
34
providers/ec2/tasks/ami-akis.json
Normal file
34
providers/ec2/tasks/ami-akis.json
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// This is a mapping of EC2 regions to processor architectures to Amazon Kernel Images
|
||||||
|
// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs
|
||||||
|
{
|
||||||
|
"ap-northeast-1": // Asia Pacific (Tokyo) Region
|
||||||
|
{"i386": "aki-136bf512", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-176bf516"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"ap-southeast-1": // Asia Pacific (Singapore) Region
|
||||||
|
{"i386": "aki-ae3973fc", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-503e7402"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"ap-southeast-2": // Asia Pacific (Sydney) Region
|
||||||
|
{"i386": "aki-cd62fff7", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-c362fff9"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"eu-west-1": // EU (Ireland) Region
|
||||||
|
{"i386": "aki-68a3451f", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-52a34525"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"sa-east-1": // South America (Sao Paulo) Region
|
||||||
|
{"i386": "aki-5b53f446", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-5553f448"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"us-east-1": // US East (Northern Virginia) Region
|
||||||
|
{"i386": "aki-8f9dcae6", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-919dcaf8"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"us-gov-west-1": // AWS GovCloud (US)
|
||||||
|
{"i386": "aki-1fe98d3c", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-1de98d3e"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"us-west-1": // US West (Northern California) Region
|
||||||
|
{"i386": "aki-8e0531cb", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-880531cd"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"us-west-2": // US West (Oregon) Region
|
||||||
|
{"i386": "aki-f08f11c0", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-fc8f11cc"}, // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
"cn-north-1":// China North (Beijing) Region
|
||||||
|
{"i386": "aki-908f1da9", // pv-grub-hd0_1.04-i386.gz
|
||||||
|
"amd64": "aki-9e8f1da7"} // pv-grub-hd0_1.04-x86_64.gz
|
||||||
|
}
|
|
@ -38,8 +38,10 @@ class BundleImage(Task):
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
bundle_name = 'bundle-{id}'.format(id=info.run_id)
|
bundle_name = 'bundle-{id}'.format(id=info.run_id)
|
||||||
info.bundle_path = os.path.join(info.workspace, bundle_name)
|
info.bundle_path = os.path.join(info.workspace, bundle_name)
|
||||||
log_check_call(['/usr/bin/euca-bundle-image',
|
arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture'])
|
||||||
|
log_check_call(['euca-bundle-image',
|
||||||
'--image', info.volume.image_path,
|
'--image', info.volume.image_path,
|
||||||
|
'--arch', arch,
|
||||||
'--user', info.credentials['user-id'],
|
'--user', info.credentials['user-id'],
|
||||||
'--privatekey', info.credentials['private-key'],
|
'--privatekey', info.credentials['private-key'],
|
||||||
'--cert', info.credentials['certificate'],
|
'--cert', info.credentials['certificate'],
|
||||||
|
@ -63,7 +65,7 @@ class UploadImage(Task):
|
||||||
else:
|
else:
|
||||||
s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info.host['region'])
|
s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info.host['region'])
|
||||||
info.manifest.manifest_location = info.manifest.image['bucket'] + '/' + info.ami_name + '.manifest.xml'
|
info.manifest.manifest_location = info.manifest.image['bucket'] + '/' + info.ami_name + '.manifest.xml'
|
||||||
log_check_call(['/usr/bin/euca-upload-bundle',
|
log_check_call(['euca-upload-bundle',
|
||||||
'--bucket', info.manifest.image['bucket'],
|
'--bucket', info.manifest.image['bucket'],
|
||||||
'--manifest', manifest_file,
|
'--manifest', manifest_file,
|
||||||
'--access-key', info.credentials['access-key'],
|
'--access-key', info.credentials['access-key'],
|
||||||
|
@ -90,48 +92,6 @@ class RegisterAMI(Task):
|
||||||
phase = phases.image_registration
|
phase = phases.image_registration
|
||||||
predecessors = [Snapshot, UploadImage]
|
predecessors = [Snapshot, UploadImage]
|
||||||
|
|
||||||
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs
|
|
||||||
kernel_mapping = {'ap-northeast-1': # Asia Pacific (Tokyo) Region
|
|
||||||
{'i386': 'aki-136bf512', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-176bf516' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'ap-southeast-1': # Asia Pacific (Singapore) Region
|
|
||||||
{'i386': 'aki-ae3973fc', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-503e7402' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'ap-southeast-2': # Asia Pacific (Sydney) Region
|
|
||||||
{'i386': 'aki-cd62fff7', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-c362fff9' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'eu-west-1': # EU (Ireland) Region
|
|
||||||
{'i386': 'aki-68a3451f', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-52a34525' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'sa-east-1': # South America (Sao Paulo) Region
|
|
||||||
{'i386': 'aki-5b53f446', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-5553f448' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'us-east-1': # US East (Northern Virginia) Region
|
|
||||||
{'i386': 'aki-8f9dcae6', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-919dcaf8' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'us-gov-west-1': # AWS GovCloud (US)
|
|
||||||
{'i386': 'aki-1fe98d3c', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-1de98d3e' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'us-west-1': # US West (Northern California) Region
|
|
||||||
{'i386': 'aki-8e0531cb', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-880531cd' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'us-west-2': # US West (Oregon) Region
|
|
||||||
{'i386': 'aki-f08f11c0', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-fc8f11cc' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
},
|
|
||||||
'cn-north-1': # China North (Beijing) Region
|
|
||||||
{'i386': 'aki-908f1da9', # pv-grub-hd0_1.04-i386.gz
|
|
||||||
'amd64': 'aki-9e8f1da7' # pv-grub-hd0_1.04-x86_64.gz
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
registration_params = {'name': info.ami_name,
|
registration_params = {'name': info.ami_name,
|
||||||
|
@ -140,17 +100,11 @@ class RegisterAMI(Task):
|
||||||
'amd64': 'x86_64'}.get(info.manifest.system['architecture'])
|
'amd64': 'x86_64'}.get(info.manifest.system['architecture'])
|
||||||
|
|
||||||
if info.manifest.volume['backing'] == 's3':
|
if info.manifest.volume['backing'] == 's3':
|
||||||
grub_boot_device = 'hd0'
|
|
||||||
registration_params['image_location'] = info.manifest.manifest_location
|
registration_params['image_location'] = info.manifest.manifest_location
|
||||||
else:
|
else:
|
||||||
root_dev_name = {'pvm': '/dev/sda',
|
root_dev_name = {'pvm': '/dev/sda',
|
||||||
'hvm': '/dev/xvda'}.get(info.manifest.data['virtualization'])
|
'hvm': '/dev/xvda'}.get(info.manifest.data['virtualization'])
|
||||||
registration_params['root_device_name'] = root_dev_name
|
registration_params['root_device_name'] = root_dev_name
|
||||||
from base.fs.partitionmaps.none import NoPartitions
|
|
||||||
if isinstance(info.volume.partition_map, NoPartitions):
|
|
||||||
grub_boot_device = 'hd0'
|
|
||||||
else:
|
|
||||||
grub_boot_device = 'hd00'
|
|
||||||
|
|
||||||
from boto.ec2.blockdevicemapping import BlockDeviceType
|
from boto.ec2.blockdevicemapping import BlockDeviceType
|
||||||
from boto.ec2.blockdevicemapping import BlockDeviceMapping
|
from boto.ec2.blockdevicemapping import BlockDeviceMapping
|
||||||
|
@ -163,8 +117,9 @@ class RegisterAMI(Task):
|
||||||
registration_params['virtualization_type'] = 'hvm'
|
registration_params['virtualization_type'] = 'hvm'
|
||||||
else:
|
else:
|
||||||
registration_params['virtualization_type'] = 'paravirtual'
|
registration_params['virtualization_type'] = 'paravirtual'
|
||||||
registration_params['kernel_id'] = (cls.kernel_mapping
|
akis_path = os.path.join(os.path.dirname(__file__), 'akis.json')
|
||||||
.get(info.host['region'])
|
from common.tools import config_get
|
||||||
.get(info.manifest.system['architecture']))
|
registration_params['kernel_id'] = config_get(akis_path, [info.host['region'],
|
||||||
|
info.manifest.system['architecture']])
|
||||||
|
|
||||||
info.image = info.connection.register_image(**registration_params)
|
info.image = info.connection.register_image(**registration_params)
|
||||||
|
|
|
@ -45,6 +45,6 @@ class ConfigurePVGrub(Task):
|
||||||
'GRUB_HIDDEN_TIMEOUT=true')
|
'GRUB_HIDDEN_TIMEOUT=true')
|
||||||
|
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub'])
|
log_check_call(['chroot', info.root, 'update-grub'])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/bin/ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst'])
|
'ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst'])
|
||||||
|
|
|
@ -3,15 +3,16 @@ from common import phases
|
||||||
from common.tasks import host
|
from common.tasks import host
|
||||||
|
|
||||||
|
|
||||||
class HostDependencies(Task):
|
class AddExternalCommands(Task):
|
||||||
description = 'Adding required host packages for EC2 bootstrapping'
|
description = 'Determining required external commands for EC2 bootstrapping'
|
||||||
phase = phases.preparation
|
phase = phases.preparation
|
||||||
successors = [host.CheckHostDependencies]
|
successors = [host.CheckExternalCommands]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, info):
|
def run(cls, info):
|
||||||
if info.manifest.volume['backing'] == 's3':
|
if info.manifest.volume['backing'] == 's3':
|
||||||
info.host_dependencies.add('euca2ools')
|
info.host_dependencies['euca-bundle-image'] = 'euca2ools'
|
||||||
|
info.host_dependencies['euca-upload-bundle'] = 'euca2ools'
|
||||||
|
|
||||||
|
|
||||||
class GetInfo(Task):
|
class GetInfo(Task):
|
||||||
|
|
|
@ -40,17 +40,17 @@ class InstallEnhancedNetworking(Task):
|
||||||
urllib.urlretrieve(drivers_url, archive)
|
urllib.urlretrieve(drivers_url, archive)
|
||||||
|
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
log_check_call('/bin/tar', '--ungzip',
|
log_check_call('tar', '--ungzip',
|
||||||
'--extract',
|
'--extract',
|
||||||
'--file', archive,
|
'--file', archive,
|
||||||
'--directory', os.path.join(info.root, 'tmp'))
|
'--directory', os.path.join(info.root, 'tmp'))
|
||||||
|
|
||||||
src_dir = os.path.join('/tmp', os.path.basename(drivers_url), 'src')
|
src_dir = os.path.join('/tmp', os.path.basename(drivers_url), 'src')
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/make', '--directory', src_dir])
|
'make', '--directory', src_dir])
|
||||||
log_check_call(['/usr/sbin/chroot', info.root,
|
log_check_call(['chroot', info.root,
|
||||||
'/usr/bin/make', 'install',
|
'make', 'install',
|
||||||
'--directory', src_dir])
|
'--directory', src_dir])
|
||||||
|
|
||||||
ixgbevf_conf_path = os.path.join(info.root, 'etc/modprobe.d/ixgbevf.conf')
|
ixgbevf_conf_path = os.path.join(info.root, 'etc/modprobe.d/ixgbevf.conf')
|
||||||
with open(ixgbevf_conf_path, 'w') as ixgbevf_conf:
|
with open(ixgbevf_conf_path, 'w') as ixgbevf_conf:
|
||||||
|
|
|
@ -1,17 +1,12 @@
|
||||||
|
// This is a mapping of Debian release codenames to processor architectures to kernel packages
|
||||||
{
|
{
|
||||||
"squeeze": {
|
"squeeze": // In squeeze, we need a special kernel flavor for xen
|
||||||
"amd64": "linux-image-xen-amd64",
|
{"i386": "linux-image-xen-686",
|
||||||
"i386" : "linux-image-xen-686" },
|
"amd64": "linux-image-xen-amd64"},
|
||||||
"wheezy": {
|
"wheezy":
|
||||||
"amd64": "linux-image-amd64",
|
{"i386": "linux-image-686",
|
||||||
"i386" : "linux-image-686" },
|
"amd64": "linux-image-amd64"},
|
||||||
"jessie": {
|
"jessie":
|
||||||
"amd64": "linux-image-amd64",
|
{"i386": "linux-image-686",
|
||||||
"i386" : "linux-image-686" },
|
"amd64": "linux-image-amd64"}
|
||||||
"testing": {
|
|
||||||
"amd64": "linux-image-amd64",
|
|
||||||
"i386" : "linux-image-686" },
|
|
||||||
"unstable": {
|
|
||||||
"amd64": "linux-image-amd64",
|
|
||||||
"i386" : "linux-image-686" }
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,9 @@ class DefaultPackages(Task):
|
||||||
info.exclude_packages.add('isc-dhcp-client')
|
info.exclude_packages.add('isc-dhcp-client')
|
||||||
info.exclude_packages.add('isc-dhcp-common')
|
info.exclude_packages.add('isc-dhcp-common')
|
||||||
|
|
||||||
# In squeeze, we need a special kernel flavor for xen
|
import os.path
|
||||||
kernels = {}
|
kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.json')
|
||||||
with open('providers/ec2/tasks/packages-kernels.json') as stream:
|
from common.tools import config_get
|
||||||
import json
|
kernel_package = config_get(kernel_packages_path, [info.release_codename,
|
||||||
kernels = json.loads(stream.read())
|
info.manifest.system['architecture']])
|
||||||
kernel_package = kernels.get(info.manifest.system['release']).get(info.manifest.system['architecture'])
|
|
||||||
info.packages.add(kernel_package)
|
info.packages.add(kernel_package)
|
||||||
|
|
|
@ -3,21 +3,22 @@
|
||||||
"title": "KVM manifest",
|
"title": "KVM manifest",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"bootstrapper": {
|
"system": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"virtio": {
|
"virtio": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"enum": ["virtio",
|
||||||
|
"virtio_pci",
|
||||||
|
"virtio_balloon",
|
||||||
|
"virtio_blk",
|
||||||
|
"virtio_net",
|
||||||
|
"virtio_ring"]
|
||||||
},
|
},
|
||||||
"minItems": 1
|
"minItems": 1
|
||||||
}
|
},
|
||||||
}
|
|
||||||
},
|
|
||||||
"system": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"bootloader": {
|
"bootloader": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["grub", "extlinux"]
|
"enum": ["grub", "extlinux"]
|
||||||
|
|
|
@ -12,5 +12,5 @@ class VirtIO(Task):
|
||||||
modules = os.path.join(info.root, '/etc/initramfs-tools/modules')
|
modules = os.path.join(info.root, '/etc/initramfs-tools/modules')
|
||||||
with open(modules, "a") as modules_file:
|
with open(modules, "a") as modules_file:
|
||||||
modules_file.write("\n")
|
modules_file.write("\n")
|
||||||
for module in info.manifest.bootstrapper.get('virtio', []):
|
for module in info.manifest.system.get('virtio', []):
|
||||||
modules_file.write(module+"\n")
|
modules_file.write(module + "\n")
|
||||||
|
|
|
@ -29,8 +29,8 @@ class AddGuestAdditionsPackages(Task):
|
||||||
info.packages.add('dkms')
|
info.packages.add('dkms')
|
||||||
|
|
||||||
from common.tools import log_check_call
|
from common.tools import log_check_call
|
||||||
[kernel_version] = log_check_call(['/usr/sbin/chroot', info.root,
|
[kernel_version] = log_check_call(['chroot', info.root,
|
||||||
'/bin/uname', '-r'])
|
'uname', '-r'])
|
||||||
kernel_headers_pkg = 'linux-headers-{version}'.format(version=kernel_version)
|
kernel_headers_pkg = 'linux-headers-{version}'.format(version=kernel_version)
|
||||||
info.packages.add(kernel_headers_pkg)
|
info.packages.add(kernel_headers_pkg)
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ class InstallGuestAdditions(Task):
|
||||||
|
|
||||||
install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run')
|
install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run')
|
||||||
from common.tools import log_call
|
from common.tools import log_call
|
||||||
status, out, err = log_call(['/usr/sbin/chroot', info.root,
|
status, out, err = log_call(['chroot', info.root,
|
||||||
install_script, '--nox11'])
|
install_script, '--nox11'])
|
||||||
# Install will exit with $?=1 because X11 isn't installed
|
# Install will exit with $?=1 because X11 isn't installed
|
||||||
if status != 1:
|
if status != 1:
|
||||||
|
|
Loading…
Add table
Reference in a new issue