pull latest updates and fix merge for opennebula modifications

This commit is contained in:
Olivier Sallou 2014-03-25 14:38:08 +01:00
commit c53e7bd467
72 changed files with 1225 additions and 409 deletions

View file

@ -1,12 +0,0 @@
# Coding standards #
* Specify the full path when invoking a command.
* Use long options whenever possible, this makes the commands invoked a lot easier to understand.
* Use tabs for indentation and spaces for alignment.
* Max line length is 110 chars.
* Multiple assignments may be aligned.
* Follow PEP8 with the exception of the following rules
* E101: Indenting with tabs and aligning with spaces
* E221: Alignment of assignments
* E241: Alignment of assignments
* E501: The line length is 110 characters not 80
* W191: We indent with tabs not spaces

View file

@ -1,31 +1,20 @@
bootstrap-vz
===========================================
bootstrap-vz is a fully automated bootstrapping tool for Debian.
It creates images for various virtualized platforms (at the moment: kvm, virtualbox, ec2).
The plugin architecture allows for heavy modification of standard behavior
(e.g. create a vagrant box, apply puppet manifests, run custom shell commands).
bootstrap-vz is a bootstrapping framework for Debian.
It is is specifically targeted at bootstrapping systems for virtualized environments.
bootstrap-vz runs without any user intervention and generates ready-to-boot images for
[a number of virtualization platforms](http://andsens.github.io/bootstrap-vz/providers.html).
Its aim is to provide a reproducable bootstrapping process using [manifests](http://andsens.github.io/bootstrap-vz/manifest.html) as well as supporting a high degree of customizability through plugins.
At no time is the resulting image booted, meaning there are no latent logfiles
or bash_history files.
bootstrap-vz was coded from scratch in python once the bash script architecture that was used in the
[build-debian-cloud](https://github.com/andsens/build-debian-cloud) bootstrapper reached its
limits.
The bootstrapper runs on a single json manifest file which contains all configurable
parameters. This allows you to recreate the image whenever you like so you can create
an updated version of an existing image or create the same image in multiple EC2 regions.
Dependencies
------------
You will need to run debian wheezy with **python 2.7** and **debootstrap** installed.
Other depencies include:
* qemu-utils
* parted
* grub2
* euca2ools
* xfsprogs (If you want to use XFS as a filesystem)
Also the following python libraries are required:
* **boto** ([version 2.14.0 or higher](https://github.com/boto/boto))
* **jsonschema** ([version 2.0.0](https://pypi.python.org/pypi/jsonschema), only available through pip)
* **termcolor**
* **fysom**
Bootstrapping instance store AMIs requires **euca2ools** to be installed.
Documentation
-------------
The documentation for bootstrap-vz is available
at [andsens.github.io/bootstrap-vz](http://andsens.github.io/bootstrap-vz).
There, you can discover [what the dependencies](http://andsens.github.io/bootstrap-vz/#dependencies)
for a specific cloud provider are, [see a list of available plugins](http://andsens.github.io/bootstrap-vz/plugins.html)
and learn [how you create a manifest](http://andsens.github.io/bootstrap-vz/manifest.html).

View file

@ -5,6 +5,13 @@ from main import main
def validate_manifest(data, validator, error):
"""Validates the manifest using the base manifest
Args:
data (dict): The data of the manifest
validator (function): The function that validates the manifest given the data and a path
error (function): The function tha raises an error when the validation fails
"""
import os.path
schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json'))
validator(data, schema_path)

View file

@ -1,22 +1,46 @@
class BootstrapInformation(object):
"""The BootstrapInformation class holds all information about the bootstrapping process.
The nature of the attributes of this class are rather diverse.
Tasks may set their own attributes on this class for later retrieval by another task.
Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed.
"""
def __init__(self, manifest=None, debug=False):
"""Instantiates a new bootstrap info object.
Args:
manifest (Manifest): The manifest
debug (bool): Whether debugging is turned on
"""
# Set the manifest attribute.
self.manifest = manifest
self.debug = debug
# Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process
import random
self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8))
# Define the path to our workspace
import os.path
self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id)
# Load all the volume information
from fs import load_volume
self.volume = load_volume(self.manifest.volume, manifest.system['bootloader'])
# The default apt mirror
self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian')
# Normalize the release codenames so that tasks may query for release codenames rather than
# 'stable', 'unstable' etc. This is useful when handling cases that are specific to a release.
release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json')
from common.tools import config_get
self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']])
class DictClass(dict):
"""Tiny extension of dict to allow setting and getting keys via attributes
"""
def __getattr__(self, name):
return self[name]
@ -24,18 +48,29 @@ class BootstrapInformation(object):
self[name] = value
def set_manifest_vars(obj, data):
"""Runs through the manifest and creates DictClasses for every key
Args:
obj (dict): dictionary to set the values on
data (dict): dictionary of values to set on the obj
"""
for key, value in data.iteritems():
if isinstance(value, dict):
obj[key] = DictClass()
set_manifest_vars(obj[key], value)
continue
# Lists are not supported
if not isinstance(value, list):
obj[key] = value
# manifest_vars is a dictionary of all the manifest values,
# with it users can cross-reference values in the manifest, so that they do not need to be written twice
self.manifest_vars = {}
self.manifest_vars['apt_mirror'] = self.apt_mirror
set_manifest_vars(self.manifest_vars, self.manifest.data)
# Populate the manifest_vars with datetime information
# and map the datetime variables directly to the dictionary
from datetime import datetime
now = datetime.now()
time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
@ -44,13 +79,23 @@ class BootstrapInformation(object):
for key in time_vars:
self.manifest_vars[key] = now.strftime(key)
# Keep a list of apt sources,
# so that tasks may add to that list without having to fiddle with apt source list files.
from pkg.sourceslist import SourceLists
self.source_lists = SourceLists(self.manifest_vars)
# Keep a list of packages that should be installed, tasks can add and remove things from this list
from pkg.packagelist import PackageList
self.packages = PackageList(self.manifest_vars, self.source_lists)
# These sets should rarely be used and specify which packages the debootstrap invocation
# should be called with.
self.include_packages = set()
self.exclude_packages = set()
self.host_dependencies = set()
# Dictionary to specify which commands are required on the host.
# The keys are commands, while the values are either package names or urls
# that hint at how a command may be made available.
self.host_dependencies = {}
# Lists of startup scripts that should be installed and disabled
self.initd = {'install': {}, 'disable': []}

View file

@ -1,10 +1,19 @@
def load_volume(data, bootloader):
"""Instantiates a volume that corresponds to the data in the manifest
Args:
data (dict): The 'volume' section from the manifest
bootloader (str): Name of the bootloader the system will boot with
Returns:
Volume. The volume that represents all information pertaining to the volume we bootstrap on
"""
from common.fs.loopbackvolume import LoopbackVolume
from providers.ec2.ebsvolume import EBSVolume
from common.fs.virtualdiskimage import VirtualDiskImage
from common.fs.virtualmachinedisk import VirtualMachineDisk
# Create a mapping between valid partition maps in the manifest and their corresponding classes
from partitionmaps.gpt import GPTPartitionMap
from partitionmaps.msdos import MSDOSPartitionMap
from partitionmaps.none import NoPartitions
@ -12,11 +21,14 @@ def load_volume(data, bootloader):
'gpt': GPTPartitionMap,
'msdos': MSDOSPartitionMap,
}
# Instantiate the partition map
partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader)
# Create a mapping between valid volume backings in the manifest and their corresponding classes
volume_backings = {'raw': LoopbackVolume,
's3': LoopbackVolume,
'vdi': VirtualDiskImage,
'vmdk': VirtualMachineDisk,
'ebs': EBSVolume
}
# Create the volume with the partition map as an argument
return volume_backings.get(data['backing'])(partition_map)

View file

@ -1,8 +1,12 @@
class VolumeError(Exception):
"""Raised when an error occurs while interacting with the volume
"""
pass
class PartitionError(Exception):
"""Raised when an error occurs while interacting with the partitions on the volume
"""
pass

View file

@ -6,25 +6,50 @@ from ..exceptions import PartitionError
class AbstractPartitionMap(FSMProxy):
"""Abstract representation of a partiton map
This class is a finite state machine and represents the state of the real partition map
"""
__metaclass__ = ABCMeta
# States the partition map can be in
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
]
def __init__(self, bootloader):
"""
Args:
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
# Create the configuration for the state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
super(AbstractPartitionMap, self).__init__(cfg)
def is_blocking(self):
"""Returns whether the partition map is blocking volume detach operations
Returns:
bool.
"""
return self.fsm.current == 'mapped'
def get_total_size(self):
"""Returns the total size the partitions occupy
Returns:
Bytes. The size of all the partitions
"""
# We just need the endpoint of the last partition
return self.partitions[-1].get_end()
def create(self, volume):
"""Creates the partition map
Args:
volume (Volume): The volume to create the partition map on
"""
self.fsm.create(volume=volume)
@abstractmethod
@ -32,19 +57,30 @@ class AbstractPartitionMap(FSMProxy):
pass
def map(self, volume):
"""Maps the partition map to device nodes
Args:
volume (Volume): The volume the partition map resides on
"""
self.fsm.map(volume=volume)
def _before_map(self, event):
"""
Raises:
PartitionError
"""
volume = event.volume
try:
mappings = log_check_call(['/sbin/kpartx', '-l', volume.device_path])
# Ask kpartx how the partitions will be mapped before actually attaching them.
mappings = log_check_call(['kpartx', '-l', volume.device_path])
import re
regexp = re.compile('^(?P<name>.+[^\d](?P<p_idx>\d+)) : '
'(?P<start_blk>\d) (?P<num_blks>\d+) '
'{device_path} (?P<blk_offset>\d+)$'
.format(device_path=volume.device_path))
log_check_call(['/sbin/kpartx', '-a', volume.device_path])
log_check_call(['kpartx', '-a', volume.device_path])
import os.path
# Run through the kpartx output and map the paths to the partitions
for mapping in mappings:
match = regexp.match(mapping)
if match is None:
@ -53,26 +89,40 @@ class AbstractPartitionMap(FSMProxy):
p_idx = int(match.group('p_idx')) - 1
self.partitions[p_idx].map(partition_path)
# Check if any partition was not mapped
for idx, partition in enumerate(self.partitions):
if partition.fsm.current not in ['mapped', 'formatted']:
raise PartitionError('kpartx did not map partition #{idx}'.format(idx=idx + 1))
except PartitionError as e:
# Revert any mapping and reraise the error
for partition in self.partitions:
if not partition.fsm.can('unmap'):
partition.unmap()
log_check_call(['/sbin/kpartx', '-d', volume.device_path])
log_check_call(['kpartx', '-d', volume.device_path])
raise e
def unmap(self, volume):
"""Unmaps the partition
Args:
volume (Volume): The volume to unmap the partition map from
"""
self.fsm.unmap(volume=volume)
def _before_unmap(self, event):
"""
Raises:
PartitionError
"""
volume = event.volume
# Run through all partitions before unmapping and make sure they can all be unmapped
for partition in self.partitions:
if partition.fsm.cannot('unmap'):
msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition)
raise PartitionError(msg)
log_check_call(['/sbin/kpartx', '-d', volume.device_path])
# Actually unmap the partitions
log_check_call(['kpartx', '-d', volume.device_path])
# Call unmap on all partitions
for partition in self.partitions:
partition.unmap()

View file

@ -5,34 +5,53 @@ from common.tools import log_check_call
class GPTPartitionMap(AbstractPartitionMap):
"""Represents a GPT partition map
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
# List of partitions
self.partitions = []
# Returns the last partition unless there is none
def last_partition():
return self.partitions[-1] if len(self.partitions) > 0 else None
# GPT offset
gpt_offset = Bytes('17KiB')
# If we are using the grub bootloader we need to create an unformatted partition
# at the beginning of the map. Its size is 1007kb, which we will steal from the
# next partition.
if bootloader == 'grub':
from ..partitions.unformatted import UnformattedPartition
self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition())
self.grub_boot.offset = gpt_offset
# Mark the partition as a bios_grub partition
self.grub_boot.flags.append('bios_grub')
self.partitions.append(self.grub_boot)
# The boot and swap partitions are optional
if 'boot' in data:
self.boot = GPTPartition(Bytes(data['boot']['size']), data['boot']['filesystem'],
self.boot = GPTPartition(Bytes(data['boot']['size']),
data['boot']['filesystem'], data['boot'].get('format_command', None),
'boot', last_partition())
self.partitions.append(self.boot)
if 'swap' in data:
self.swap = GPTSwapPartition(Bytes(data['swap']['size']), last_partition())
self.partitions.append(self.swap)
self.root = GPTPartition(Bytes(data['root']['size']), data['root']['filesystem'],
self.root = GPTPartition(Bytes(data['root']['size']),
data['root']['filesystem'], data['root'].get('format_command', None),
'root', last_partition())
self.partitions.append(self.root)
# Depending on whether we have a grub boot partition
# we will need to set the offset accordingly.
if hasattr(self, 'grub_boot'):
self.partitions[1].size -= gpt_offset
self.partitions[1].size -= self.grub_boot.size
@ -43,8 +62,13 @@ class GPTPartitionMap(AbstractPartitionMap):
super(GPTPartitionMap, self).__init__(bootloader)
def _before_create(self, event):
"""Creates the partition map
"""
volume = event.volume
log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path,
# Disk alignment still plays a role in virtualized environment,
# but I honestly have no clue as to what best practice is here, so we choose 'none'
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
'--', 'mklabel', 'gpt'])
# Create the partitions
for partition in self.partitions:
partition.create(volume)

View file

@ -5,25 +5,43 @@ from common.tools import log_check_call
class MSDOSPartitionMap(AbstractPartitionMap):
"""Represents a MS-DOS partition map
Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is)
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
# List of partitions
self.partitions = []
# Returns the last partition unless there is none
def last_partition():
return self.partitions[-1] if len(self.partitions) > 0 else None
# The boot and swap partitions are optional
if 'boot' in data:
self.boot = MSDOSPartition(Bytes(data['boot']['size']), data['boot']['filesystem'], None)
self.boot = MSDOSPartition(Bytes(data['boot']['size']),
data['boot']['filesystem'], data['boot'].get('format_command', None),
last_partition())
self.partitions.append(self.boot)
if 'swap' in data:
self.swap = MSDOSSwapPartition(Bytes(data['swap']['size']), last_partition())
self.partitions.append(self.swap)
self.root = MSDOSPartition(Bytes(data['root']['size']), data['root']['filesystem'], last_partition())
self.root = MSDOSPartition(Bytes(data['root']['size']),
data['root']['filesystem'], data['root'].get('format_command', None),
last_partition())
self.partitions.append(self.root)
# Mark boot as the boot partition, or root, if boot does not exist
getattr(self, 'boot', self.root).flags.append('boot')
# If we are using the grub bootloader, we will need to create a 2 MB offset at the beginning
# of the partitionmap and steal it from the first partition
if bootloader == 'grub':
self.partitions[0].offset = Bytes('2MiB')
self.partitions[0].size -= self.partitions[0].offset
@ -32,7 +50,10 @@ class MSDOSPartitionMap(AbstractPartitionMap):
def _before_create(self, event):
volume = event.volume
log_check_call(['/sbin/parted', '--script', '--align', 'none', volume.device_path,
# Disk alignment still plays a role in virtualized environment,
# but I honestly have no clue as to what best practice is here, so we choose 'none'
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
'--', 'mklabel', 'msdos'])
# Create the partitions
for partition in self.partitions:
partition.create(volume)

View file

@ -2,14 +2,35 @@ from ..partitions.single import SinglePartition
class NoPartitions(object):
"""Represents a virtual 'NoPartitions' partitionmap.
This virtual partition map exists because it is easier for tasks to
simply always deal with partition maps and then let the base abstract that away.
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
self.root = SinglePartition(Bytes(data['root']['size']), data['root']['filesystem'])
# In the NoPartitions partitions map we only have a single 'partition'
self.root = SinglePartition(Bytes(data['root']['size']),
data['root']['filesystem'], data['root'].get('format_command', None))
self.partitions = [self.root]
def is_blocking(self):
"""Returns whether the partition map is blocking volume detach operations
Returns:
bool.
"""
return self.root.fsm.current == 'mounted'
def get_total_size(self):
"""Returns the total size the partitions occupy
Returns:
Bytes. The size of all the partitions
"""
return self.root.get_end()

View file

@ -6,9 +6,13 @@ from common.fsm_proxy import FSMProxy
class AbstractPartition(FSMProxy):
"""Abstract representation of a partiton
This class is a finite state machine and represents the state of the real partition
"""
__metaclass__ = ABCMeta
# Our states
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'},
{'name': 'format', 'src': 'created', 'dst': 'formatted'},
{'name': 'mount', 'src': 'formatted', 'dst': 'mounted'},
@ -16,37 +20,68 @@ class AbstractPartition(FSMProxy):
]
class Mount(object):
"""Represents a mount into the partition
"""
def __init__(self, source, destination, opts):
"""
Args:
source (str,AbstractPartition): The path from where we mount or a partition
destination (str): The path of the mountpoint
opts (list): List of options to pass to the mount command
"""
self.source = source
self.destination = destination
self.opts = opts
def mount(self, prefix):
"""Performs the mount operation or forwards it to another partition
Args:
prefix (str): Path prefix of the mountpoint
"""
mount_dir = os.path.join(prefix, self.destination)
# If the source is another partition, we tell that partition to mount itself
if isinstance(self.source, AbstractPartition):
self.source.mount(destination=mount_dir)
else:
log_check_call(['/bin/mount'] + self.opts + [self.source, mount_dir])
log_check_call(['mount'] + self.opts + [self.source, mount_dir])
self.mount_dir = mount_dir
def unmount(self):
"""Performs the unmount operation or asks the partition to unmount itself
"""
# If its a partition, it can unmount itself
if isinstance(self.source, AbstractPartition):
self.source.unmount()
else:
log_check_call(['/bin/umount', self.mount_dir])
log_check_call(['umount', self.mount_dir])
del self.mount_dir
def __init__(self, size, filesystem):
self.size = size
self.filesystem = filesystem
self.device_path = None
self.mounts = {}
def __init__(self, size, filesystem, format_command):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
"""
self.size = size
self.filesystem = filesystem
self.format_command = format_command
# Path to the partition
self.device_path = None
# Dictionary with mount points as keys and Mount objects as values
self.mounts = {}
# Create the configuration for our state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
super(AbstractPartition, self).__init__(cfg)
def get_uuid(self):
[uuid] = log_check_call(['/sbin/blkid', '-s', 'UUID', '-o', 'value', self.device_path])
"""Gets the UUID of the partition
Returns:
str. The UUID of the partition
"""
[uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path])
return uuid
@abstractmethod
@ -54,33 +89,77 @@ class AbstractPartition(FSMProxy):
pass
def get_end(self):
"""Gets the end of the partition
Returns:
Bytes. The end of the partition
"""
return self.get_start() + self.size
def _before_format(self, e):
mkfs = '/sbin/mkfs.{fs}'.format(fs=self.filesystem)
log_check_call([mkfs, self.device_path])
"""Formats the partition
"""
# If there is no explicit format_command define we simply call mkfs.fstype
if self.format_command is None:
format_command = ['mkfs.{fs}', '{device_path}']
else:
format_command = self.format_command
variables = {'fs': self.filesystem,
'device_path': self.device_path,
'size': self.size,
}
command = map(lambda part: part.format(**variables), format_command)
# Format the partition
log_check_call(command)
def _before_mount(self, e):
log_check_call(['/bin/mount', '--types', self.filesystem, self.device_path, e.destination])
"""Mount the partition
"""
log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination])
self.mount_dir = e.destination
def _after_mount(self, e):
"""Mount any mounts associated with this partition
"""
# Make sure we mount in ascending order of mountpoint path length
# This ensures that we don't mount /dev/pts before we mount /dev
for destination in sorted(self.mounts.iterkeys(), key=len):
self.mounts[destination].mount(self.mount_dir)
def _before_unmount(self, e):
"""Unmount any mounts associated with this partition
"""
# Unmount the mounts in descending order of mounpoint path length
# You cannot unmount /dev before you have unmounted /dev/pts
for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True):
self.mounts[destination].unmount()
log_check_call(['/bin/umount', self.mount_dir])
log_check_call(['umount', self.mount_dir])
del self.mount_dir
def add_mount(self, source, destination, opts=[]):
"""Associate a mount with this partition
Automatically mounts it
Args:
source (str,AbstractPartition): The source of the mount
destination (str): The path to the mountpoint
opts (list): Any options that should be passed to the mount command
"""
# Create a new mount object, mount it if the partition is mounted and put it in the mounts dict
mount = self.Mount(source, destination, opts)
if self.fsm.current == 'mounted':
mount.mount(self.mount_dir)
self.mounts[destination] = mount
def remove_mount(self, destination):
"""Remove a mount from this partition
Automatically unmounts it
Args:
destination (str): The mountpoint path of the mount that should be removed
"""
# Unmount the mount if the partition is mounted and delete it from the mounts dict
# If the mount is already unmounted and the source is a partition, this will raise an exception
if self.fsm.current == 'mounted':
self.mounts[destination].unmount()
del self.mounts[destination]

View file

@ -2,7 +2,11 @@ from abstract import AbstractPartition
class BasePartition(AbstractPartition):
"""Represents a partition that is actually a partition (and not a virtual one like 'Single')
"""
# Override the states of the abstract partition
# A real partition can be mapped and unmapped
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'format', 'src': 'mapped', 'dst': 'formatted'},
@ -14,46 +18,88 @@ class BasePartition(AbstractPartition):
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
]
def __init__(self, size, filesystem, previous):
def __init__(self, size, filesystem, format_command, previous):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
previous (BasePartition): The partition that preceeds this one
"""
# By saving the previous partition we have
# a linked list that partitions can go backwards in to find the first partition.
self.previous = previous
from common.bytes import Bytes
# Initialize the offset to 0 bytes, may be changed later
self.offset = Bytes(0)
# List of flags that parted should put on the partition
self.flags = []
super(BasePartition, self).__init__(size, filesystem)
super(BasePartition, self).__init__(size, filesystem, format_command)
def create(self, volume):
"""Creates the partition
Args:
volume (Volume): The volume to create the partition on
"""
self.fsm.create(volume=volume)
def get_index(self):
"""Gets the index of this partition in the partition map
Returns:
int. The index of the partition in the partition map
"""
if self.previous is None:
# Partitions are 1 indexed
return 1
else:
# Recursive call to the previous partition, walking up the chain...
return self.previous.get_index() + 1
def get_start(self):
"""Gets the starting byte of this partition
Returns:
Bytes. The starting byte of this partition
"""
if self.previous is None:
# If there is no previous partition, this partition begins at the offset
return self.offset
else:
# Get the end of the previous partition and add the offset of this partition
return self.previous.get_end() + self.offset
def map(self, device_path):
"""Maps the partition to a device_path
Args:
device_path (str): The device patht his partition should be mapped to
"""
self.fsm.map(device_path=device_path)
def _before_create(self, e):
"""Creates the partition
"""
from common.tools import log_check_call
# The create command is failry simple, start and end are just Bytes objects coerced into strings
create_command = ('mkpart primary {start} {end}'
.format(start=str(self.get_start()),
end=str(self.get_end())))
log_check_call(['/sbin/parted', '--script', '--align', 'none', e.volume.device_path,
# Create the partition
log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path,
'--', create_command])
# Set any flags on the partition
for flag in self.flags:
log_check_call(['/sbin/parted', '--script', e.volume.device_path,
log_check_call(['parted', '--script', e.volume.device_path,
'--', ('set {idx} {flag} on'
.format(idx=str(self.get_index()), flag=flag))])
def _before_map(self, e):
# Set the device path
self.device_path = e.device_path
def _before_unmap(self, e):
# When unmapped, the device_path ifnromation becomes invalid, so we delete it
self.device_path = None

View file

@ -3,16 +3,27 @@ from base import BasePartition
class GPTPartition(BasePartition):
"""Represents a GPT partition
"""
def __init__(self, size, filesystem, name, previous):
def __init__(self, size, filesystem, format_command, name, previous):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
name (str): The name of the partition
previous (BasePartition): The partition that preceeds this one
"""
self.name = name
super(GPTPartition, self).__init__(size, filesystem, previous)
super(GPTPartition, self).__init__(size, filesystem, format_command, previous)
def _before_create(self, e):
# Create the partition and then set the name of the partition afterwards
super(GPTPartition, self)._before_create(e)
# partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical)
name_command = ('name {idx} {name}'
.format(idx=self.get_index(),
name=self.name))
log_check_call(['/sbin/parted', '--script', e.volume.device_path,
log_check_call(['parted', '--script', e.volume.device_path,
'--', name_command])

View file

@ -3,9 +3,16 @@ from gpt import GPTPartition
class GPTSwapPartition(GPTPartition):
"""Represents a GPT swap partition
"""
def __init__(self, size, previous):
super(GPTSwapPartition, self).__init__(size, 'swap', 'swap', previous)
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous)
def _before_format(self, e):
log_check_call(['/sbin/mkswap', self.device_path])
log_check_call(['mkswap', self.device_path])

View file

@ -2,4 +2,6 @@ from base import BasePartition
class MSDOSPartition(BasePartition):
"""Represents an MS-DOS partition
"""
pass

View file

@ -3,9 +3,16 @@ from msdos import MSDOSPartition
class MSDOSSwapPartition(MSDOSPartition):
"""Represents a MS-DOS swap partition
"""
def __init__(self, size, previous):
super(MSDOSSwapPartition, self).__init__(size, 'swap', previous)
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous)
def _before_format(self, e):
log_check_call(['/sbin/mkswap', self.device_path])
log_check_call(['mkswap', self.device_path])

View file

@ -2,7 +2,15 @@ from abstract import AbstractPartition
class SinglePartition(AbstractPartition):
"""Represents a single virtual partition on an unpartitioned volume
"""
def get_start(self):
"""Gets the starting byte of this partition
Returns:
Bytes. The starting byte of this partition
"""
from common.bytes import Bytes
# On an unpartitioned volume there is no offset and no previous partition
return Bytes(0)

View file

@ -2,11 +2,20 @@ from base import BasePartition
class UnformattedPartition(BasePartition):
"""Represents an unformatted partition
It cannot be mounted
"""
# The states for our state machine. It can only be mapped, not mounted.
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
]
def __init__(self, size, previous):
super(UnformattedPartition, self).__init__(size, None, previous)
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(UnformattedPartition, self).__init__(size, None, None, previous)

View file

@ -6,9 +6,13 @@ from partitionmaps.none import NoPartitions
class Volume(FSMProxy):
"""Represents an abstract volume.
This class is a finite state machine and represents the state of the real volume.
"""
__metaclass__ = ABCMeta
# States this volume can be in
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'},
{'name': 'attach', 'src': 'detached', 'dst': 'attached'},
{'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'},
@ -18,33 +22,76 @@ class Volume(FSMProxy):
]
def __init__(self, partition_map):
"""
Args:
partition_map (PartitionMap): The partition map for the volume
"""
# Path to the volume
self.device_path = None
self.real_device_path = None
# The partition map
self.partition_map = partition_map
# The size of the volume as reported by the partition map
self.size = self.partition_map.get_total_size()
# Before detaching, check that nothing would block the detachment
callbacks = {'onbeforedetach': self._check_blocking}
if isinstance(self.partition_map, NoPartitions):
# When the volume has no partitions, the virtual root partition path is equal to that of the volume
# Update that path whenever the path to the volume changes
def set_dev_path(e):
self.partition_map.root.device_path = self.device_path
callbacks['onafterattach'] = set_dev_path
callbacks['onlink_dm_node'] = set_dev_path
callbacks['onunlink_dm_node'] = set_dev_path
# Create the configuration for our finite state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks}
super(Volume, self).__init__(cfg)
def _after_create(self, e):
"""
Args:
e (_e_obj): Event object containing arguments to create()
"""
if isinstance(self.partition_map, NoPartitions):
# When the volume has no partitions, the virtual root partition
# is essentially created when the volume is created, forward that creation event.
self.partition_map.root.create()
def _check_blocking(self, e):
"""Checks whether the volume is blocked
Args:
e (_e_obj): Event object containing arguments to create()
Raises:
VolumeError
"""
# Only the partition map can block the volume
if self.partition_map.is_blocking():
raise VolumeError('The partitionmap prevents the detach procedure')
def _before_link_dm_node(self, e):
"""Links the volume using the device mapper
This allows us to create a 'window' into the volume that acts like a volum in itself.
Mainly it is used to fool grub into thinking that it is working with a real volume,
rather than a loopback device or a network block device.
Args:
e (_e_obj): Event object containing arguments to create()
Arguments are:
logical_start_sector (int): The sector the volume should start at in the new volume
start_sector (int): The offset at which the volume should begin to be mapped in the new volume
sectors (int): The number of sectors that should be mapped
Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en
Raises:
VolumeError
"""
import os.path
from common.fs import get_partitions
# Fetch information from /proc/partitions
proc_partitions = get_partitions()
device_name = os.path.basename(self.device_path)
device_partition = proc_partitions[device_name]
@ -55,8 +102,10 @@ class Volume(FSMProxy):
# The offset at which the volume should begin to be mapped in the new volume
start_sector = getattr(e, 'start_sector', 0)
# The number of sectors that should be mapped
sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector)
# This is the table we send to dmsetup, so that it may create a decie mapping for us.
table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}'
.format(log_start_sec=logical_start_sector,
sectors=sectors,
@ -65,6 +114,7 @@ class Volume(FSMProxy):
start_sec=start_sector))
import string
import os.path
# Figure out the device letter and path
for letter in string.ascii_lowercase:
dev_name = 'vd' + letter
dev_path = os.path.join('/dev/mapper', dev_name)
@ -76,12 +126,21 @@ class Volume(FSMProxy):
if not hasattr(self, 'dm_node_name'):
raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume')
log_check_call(['/sbin/dmsetup', 'create', self.dm_node_name], table)
# Create the device mapping
log_check_call(['dmsetup', 'create', self.dm_node_name], table)
# Update the device_path but remember the old one for when we unlink the volume again
self.unlinked_device_path = self.device_path
self.device_path = self.dm_node_path
def _before_unlink_dm_node(self, e):
log_check_call(['/sbin/dmsetup', 'remove', self.dm_node_name])
"""Unlinks the device mapping
Args:
e (_e_obj): Event object containing arguments to create()
"""
log_check_call(['dmsetup', 'remove', self.dm_node_name])
# Delete the no longer valid information
del self.dm_node_name
del self.dm_node_path
# Reset the device_path
self.device_path = self.unlinked_device_path

View file

@ -1,7 +1,20 @@
"""This module holds functions and classes responsible for formatting the log output
both to a file and to the console.
.. module:: log
"""
import logging
def get_logfile_path(manifest_path):
"""Returns the path to a logfile given a manifest
The logfile name is constructed from the current timestamp and the basename of the manifest
Args:
manifest_path (str): The path to the manifest
Returns:
str. The path to the logfile
"""
import os.path
from datetime import datetime
@ -13,17 +26,31 @@ def get_logfile_path(manifest_path):
def setup_logger(logfile=None, debug=False):
"""Sets up the python logger to log to both a file and the console
Args:
logfile (str): Path to a logfile
debug (bool): Whether to log debug output to the console
"""
root = logging.getLogger()
# Make sure all logging statements are processed by our handlers, they decide the log level
root.setLevel(logging.NOTSET)
# Create a file log handler
file_handler = logging.FileHandler(logfile)
# Absolute timestamps are rather useless when bootstrapping, it's much more interesting
# to see how long things take, so we log in a relative format instead
file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s'))
# The file log handler always logs everything
file_handler.setLevel(logging.DEBUG)
root.addHandler(file_handler)
# Create a console log handler
import sys
console_handler = logging.StreamHandler(sys.stderr)
# We want to colorize the output to the console, so we add a formatter
console_handler.setFormatter(ConsoleFormatter())
# Set the log level depending on the debug argument
if debug:
console_handler.setLevel(logging.DEBUG)
else:
@ -32,6 +59,8 @@ def setup_logger(logfile=None, debug=False):
class ConsoleFormatter(logging.Formatter):
"""Formats log statements for the console
"""
level_colors = {logging.ERROR: 'red',
logging.WARNING: 'magenta',
logging.INFO: 'blue',
@ -39,11 +68,15 @@ class ConsoleFormatter(logging.Formatter):
def format(self, record):
if(record.levelno in self.level_colors):
# Colorize the message if we have a color for it (DEBUG has no color)
from termcolor import colored
record.msg = colored(record.msg, self.level_colors[record.levelno])
return super(ConsoleFormatter, self).format(record)
class FileFormatter(logging.Formatter):
"""Formats log statements for output to file
Currently this is just a stub
"""
def format(self, record):
return super(FileFormatter, self).format(record)

View file

@ -1,16 +1,34 @@
"""Main module containing all the setup necessary for running the bootstrapping process
.. module:: main
"""
import logging
log = logging.getLogger(__name__)
def main():
import log
"""Main function for invoking the bootstrap process
Raises:
Exception
"""
# Get the commandline arguments
import os
args = get_args()
# Require root privileges, except when doing a dry-run where they aren't needed
if os.geteuid() != 0 and not args.dry_run:
raise Exception('This program requires root privileges.')
# Setup logging
import log
logfile = log.get_logfile_path(args.manifest)
log.setup_logger(logfile=logfile, debug=args.debug)
# Everything has been set up, begin the bootstrapping process
run(args)
def get_args():
"""Creates an argument parser and returns the arguments it has parsed
"""
from argparse import ArgumentParser
parser = ArgumentParser(description='Bootstrap Debian for the cloud.')
parser.add_argument('--debug', action='store_true',
@ -24,31 +42,57 @@ def get_args():
def run(args):
"""Runs the bootstrapping process
Args:
args (dict): Dictionary of arguments from the commandline
"""
# Load the manifest
from manifest import Manifest
manifest = Manifest(args.manifest)
# Get the tasklist
from tasklist import TaskList
tasklist = TaskList()
# 'resolve_tasks' is the name of the function to call on the provider and plugins
tasklist.load('resolve_tasks', manifest)
# Create the bootstrap information object that'll be used throughout the bootstrapping process
from bootstrapinfo import BootstrapInformation
bootstrap_info = BootstrapInformation(manifest=manifest, debug=args.debug)
try:
# Run all the tasks the tasklist has gathered
tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
# We're done! :-)
log.info('Successfully completed bootstrapping')
except (Exception, KeyboardInterrupt) as e:
# When an error occurs, log it and begin rollback
log.exception(e)
if args.pause_on_error:
raw_input("Press Enter to commence rollback")
# The --pause-on-error is useful when the user wants to inspect the volume before rollback
raw_input('Press Enter to commence rollback')
log.error('Rolling back')
# Create a new tasklist to gather the necessary tasks for rollback
rollback_tasklist = TaskList()
# Create a useful little function for the provider and plugins to use,
# when figuring out what tasks should be added to the rollback list.
def counter_task(task, counter):
"""counter_task() adds the second argument to the rollback tasklist
if the first argument is present in the list of completed tasks
Args:
task (Task): The task to look for in the completed tasks list
counter (Task): The task to add to the rollback tasklist
"""
if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed:
rollback_tasklist.tasks.add(counter)
# Ask the provider and plugins for tasks they'd like to add to the rollback tasklist
# Any additional arguments beyond the first two are passed directly to the provider and plugins
rollback_tasklist.load('resolve_rollback_tasks', manifest, counter_task)
# Run the rollback tasklist
rollback_tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
log.info('Successfully completed rollback')

View file

@ -99,7 +99,7 @@
"additionalProperties": false
}
},
"required": ["provider", "bootstrapper", "image", "volume", "system"],
"required": ["provider", "bootstrapper", "system", "volume"],
"definitions": {
"path": {
"type": "string",
@ -141,10 +141,14 @@
"type": "object",
"properties": {
"size": { "$ref": "#/definitions/bytes" },
"filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] }
"filesystem": { "enum": ["ext2", "ext3", "ext4", "xfs"] },
"format_command": {
"type": "array",
"items": {"type": "string"},
"minItems": 1
}
},
"required": ["size", "filesystem"]
}
},
"required": ["provider", "bootstrapper", "system", "packages", "volume"]
}
}

View file

@ -1,21 +1,48 @@
"""The Manifest module contains the manifest that providers and plugins use
to determine which tasks should be added to the tasklist, what arguments various
invocations should have etc..
.. module:: manifest
"""
from common.tools import load_json
import logging
log = logging.getLogger(__name__)
class Manifest(object):
"""This class holds all the information that providers and plugins need
to perform the bootstrapping process. All actions that are taken originate from
here. The manifest shall not be modified after it has been loaded.
Currently, immutability is not enforced and it would require a fair amount of code
to enforce it, instead we just rely on tasks behaving properly.
"""
def __init__(self, path):
"""Initializer: Given a path we load, validate and parse the manifest.
Args:
path (str): The path to the manifest
"""
self.path = path
self.load()
self.validate()
self.parse()
def load(self):
self.data = self.load_json(self.path)
"""Loads the manifest.
This function not only reads the manifest but also loads the specified provider and plugins.
Once they are loaded, the initialize() function is called on each of them (if it exists).
The provider must have an initialize function.
"""
# Load the manifest JSON using the loader in common.tools
# It strips comments (which are invalid in strict json) before loading the data.
self.data = load_json(self.path)
# Get the provider name from the manifest and load the corresponding module
provider_modname = 'providers.{provider}'.format(provider=self.data['provider'])
log.debug('Loading provider `{modname}\''.format(modname=provider_modname))
# Create a modules dict that contains the loaded provider and plugins
self.modules = {'provider': __import__(provider_modname, fromlist=['providers']),
'plugins': [],
}
# Run through all the plugins mentioned in the manifest and load them
if 'plugins' in self.data:
for plugin_name, plugin_data in self.data['plugins'].iteritems():
modname = 'plugins.{plugin}'.format(plugin=plugin_name)
@ -23,44 +50,76 @@ class Manifest(object):
plugin = __import__(modname, fromlist=['plugins'])
self.modules['plugins'].append(plugin)
# Run the initialize function on the provider and plugins
self.modules['provider'].initialize()
for module in self.modules['plugins']:
# Plugins are not required to have an initialize function
init = getattr(module, 'initialize', None)
if callable(init):
init()
def validate(self):
"""Validates the manifest using the base, provider and plugin validation functions.
Plugins are not required to have a validate_manifest function
"""
from . import validate_manifest
# Validate the manifest with the base validation function in __init__
validate_manifest(self.data, self.schema_validator, self.validation_error)
# Run the provider validation
self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error)
# Run the validation function for any plugin that has it
for plugin in self.modules['plugins']:
validate = getattr(plugin, 'validate_manifest', None)
if callable(validate):
validate(self.data, self.schema_validator, self.validation_error)
def parse(self):
"""Parses the manifest.
Well... "parsing" is a big word.
The function really just sets up some convenient attributes so that tasks
don't have to access information with info.manifest.data['section']
but can do it with info.manifest.section.
"""
self.provider = self.data['provider']
self.bootstrapper = self.data['bootstrapper']
self.image = self.data['image']
self.volume = self.data['volume']
self.system = self.data['system']
self.packages = self.data['packages']
# The packages and plugins section is not required
self.packages = self.data['packages'] if 'packages' in self.data else {}
self.plugins = self.data['plugins'] if 'plugins' in self.data else {}
def load_json(self, path):
"""Loads JSON. Unused and will be removed.
Use common.tools.load_json instead
"""
import json
from minify_json import json_minify
with open(path) as stream:
return json.loads(json_minify(stream.read(), False))
def schema_validator(self, data, schema_path):
"""This convenience function is passed around to all the validation functions
so that they may run a json-schema validation by giving it the data and a path to the schema.
Args:
data (dict): Data to validate (normally the manifest data)
schema_path (str): Path to the json-schema to use for validation
"""
import jsonschema
schema = self.load_json(schema_path)
schema = load_json(schema_path)
try:
jsonschema.validate(data, schema)
except jsonschema.ValidationError as e:
self.validation_error(e.message, e.path)
def validation_error(self, message, json_path=None):
"""This function is passed to all validation functions so that they may
raise a validation error because a custom validation of the manifest failed.
Args:
message (str): Message to user about the error
json_path (list): A path to the location in the manifest where the error occurred
"""
from common.exceptions import ManifestError
raise ManifestError(message, self.path, json_path)

View file

@ -1,16 +1,33 @@
class Phase(object):
"""The Phase class represents a phase a task may be in.
It has no function other than to act as an anchor in the task graph.
All phases are instantiated in common.phases
"""
def __init__(self, name, description):
# The name of the phase
self.name = name
# The description of the phase (currently not used anywhere)
self.description = description
def pos(self):
"""Gets the position of the phase
Returns:
int. The positional index of the phase in relation to the other phases
"""
from common.phases import order
return next(i for i, phase in enumerate(order) if phase is self)
def __cmp__(self, other):
"""Compares the phase order in relation to the other phases
"""
return self.pos() - other.pos()
def __str__(self):
"""String representation of the phase, the name suffices
Returns:
string.
"""
return self.name

View file

@ -1,8 +1,12 @@
class PackageError(Exception):
"""Raised when an error occurrs while handling the packageslist
"""
pass
class SourceError(Exception):
"""Raised when an error occurs while handling the sourceslist
"""
pass

View file

@ -2,47 +2,96 @@ from exceptions import PackageError
class PackageList(object):
"""Represents a list of packages
"""
class Remote(object):
"""A remote package with an optional target
"""
def __init__(self, name, target):
"""
Args:
name (str): The name of the package
target (str): The name of the target release
"""
self.name = name
self.target = target
def __str__(self):
"""Converts the package into somehting that apt-get install can parse
Returns:
string.
"""
if self.target is None:
return self.name
else:
return '{name}/{target}'.format(name=self.name, target=self.target)
class Local(object):
"""A local package
"""
def __init__(self, path):
"""
Args:
path (str): The path to the local package
"""
self.path = path
def __str__(self):
"""
Returns:
string. The path to the local package
"""
return self.path
def __init__(self, manifest_vars, source_lists):
"""
Args:
manifest_vars (dict): The manifest variables
source_lists (SourceLists): The sourcelists for apt
"""
self.manifest_vars = manifest_vars
self.source_lists = source_lists
# The default_target is the release we are bootstrapping
self.default_target = '{system.release}'.format(**self.manifest_vars)
# The list of packages that should be installed, this is not a set.
# We want to preserve the order in which the packages were added so that local
# packages may be installed in the correct order.
self.install = []
# A function that filters the install list and only returns remote packages
self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install)
def add(self, name, target=None):
"""Adds a package to the install list
Args:
name (str): The name of the package to install, may contain manifest vars references
target (str): The name of the target release for the package, may contain manifest vars references
Raises:
PackageError
"""
name = name.format(**self.manifest_vars)
if target is not None:
target = target.format(**self.manifest_vars)
# Check if the package has already been added.
# If so, make sure it's the same target and raise a PackageError otherwise
package = next((pkg for pkg in self.remote() if pkg.name == name), None)
if package is not None:
same_target = package.target != target
# It's the same target if the target names match or one of the targets is None
# and the other is the default target.
same_target = package.target == target
same_target = same_target or package.target is None and target == self.default_target
same_target = same_target or package.target == self.default_target and target is None
if not same_target:
msg = ('The package {name} was already added to the package list, '
'but with another target release ({target})').format(name=name, target=package.target)
'but with target release `{target}\' instead of `{add_target}\''
.format(name=name, target=package.target, add_target=target))
raise PackageError(msg)
# The package has already been added, skip the checks below
return
# Check if the target exists in the sources list, raise a PackageError if not
check_target = target
if check_target is None:
check_target = self.default_target
@ -50,8 +99,17 @@ class PackageList(object):
msg = ('The target release {target} was not found in the sources list').format(target=check_target)
raise PackageError(msg)
# Note that we maintain the target value even if it is none.
# This allows us to preserve the semantics of the default target when calling apt-get install
# Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define
# a target release. Only `apt-get install nfs-client` works.
self.install.append(self.Remote(name, target))
def add_local(self, package_path):
"""Adds a local package to the installation list
Args:
package_path (str): Path to the local package, may contain manifest vars references
"""
package_path = package_path.format(**self.manifest_vars)
self.install.append(self.Local(package_path))

View file

@ -1,12 +1,27 @@
class SourceLists(object):
"""Represents a list of sources lists for apt
"""
def __init__(self, manifest_vars):
"""
Args:
manifest_vars (dict): The manifest variables
"""
# A dictionary with the name of the file in sources.list.d as the key
# That values are lists of Source objects
self.sources = {}
# Save the manifest variables, we need the later on
self.manifest_vars = manifest_vars
def add(self, name, line):
"""Adds a source to the apt sources list
Args:
name (str): Name of the file in sources.list.d, may contain manifest vars references
line (str): The line for the source file, may contain manifest vars references
"""
name = name.format(**self.manifest_vars)
line = line.format(**self.manifest_vars)
if name not in self.sources:
@ -14,7 +29,16 @@ class SourceLists(object):
self.sources[name].append(Source(line))
def target_exists(self, target):
"""Checks whether the target exists in the sources list
Args:
target (str): Name of the target to check for, may contain manifest vars references
Returns:
bool. Whether the target exists
"""
target = target.format(**self.manifest_vars)
# Run through all the sources and return True if the target exists
for lines in self.sources.itervalues():
if target in (source.distribution for source in lines):
return True
@ -22,8 +46,20 @@ class SourceLists(object):
class Source(object):
"""Represents a single source line
"""
def __init__(self, line):
"""
Args:
line (str): A apt source line
Raises:
SourceError
"""
# Parse the source line and populate the class attributes with it
# The format is taken from `man sources.list`
# or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en
import re
regexp = re.compile('^(?P<type>deb|deb-src)\s+'
'(\[\s*(?P<options>.+\S)?\s*\]\s+)?'
@ -45,6 +81,12 @@ class Source(object):
self.components = re.sub(' +', ' ', match['components']).split(' ')
def __str__(self):
"""Convert the object into a source line
This is pretty much the reverse of what we're doing in the initialization function.
Returns:
string.
"""
options = ''
if len(self.options) > 0:
options = ' [{options}]'.format(options=' '.join(self.options))

View file

@ -0,0 +1,22 @@
{ // This is a mapping of Debian release names to their respective codenames
"unstable": "sid",
"testing": "jessie",
"stable": "wheezy",
"oldstable": "squeeze",
"jessie": "jessie",
"wheezy": "wheezy",
"squeeze": "squeeze",
// The following release names are not supported, but included of completeness sake
"lenny": "lenny",
"etch": "etch",
"sarge": "sarge",
"woody": "woody",
"potato": "potato",
"slink": "slink",
"hamm": "hamm",
"bo": "bo",
"rex": "rex",
"buzz": "buzz"
}

View file

@ -1,17 +1,37 @@
class Task(object):
"""The task class represents are task that can be run.
It is merely a wrapper for the run function and should never be instantiated.
"""
# The phase this task is located in.
phase = None
# List of tasks that should run before this task is run
predecessors = []
# List of tasks that should run after this task has run
successors = []
class __metaclass__(type):
"""Metaclass to control how the class is coerced into a string
"""
def __repr__(cls):
"""
Returns:
string.
"""
return '{module}.{task}'.format(module=cls.__module__, task=cls.__name__)
def __str__(cls):
"""
Returns:
string.
"""
return repr(cls)
@classmethod
def run(cls, info):
"""The run function, all work is done inside this function
Args:
info (BootstrapInformation): The bootstrap info object
"""
pass

View file

@ -1,49 +1,95 @@
"""The tasklist module contains the TaskList class.
.. module:: tasklist
"""
from common.exceptions import TaskListError
import logging
log = logging.getLogger(__name__)
class TaskList(object):
"""The tasklist class aggregates all tasks that should be run
and orders them according to their dependencies.
"""
def __init__(self):
self.tasks = set()
self.tasks_completed = []
def load(self, function, manifest, *args):
"""Calls 'function' on the provider and all plugins that have been loaded by the manifest.
Any additional arguments are passed directly to 'function'.
The function that is called shall accept the taskset as its first argument and the manifest
as its second argument.
Args:
function (str): Name of the function to call
manifest (Manifest): The manifest
*args: Additional arguments that should be passed to the function that is called
"""
# Call 'function' on the provider
getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args)
for plugin in manifest.modules['plugins']:
# Plugins har not required to have whatever function we call
fn = getattr(plugin, function, None)
if callable(fn):
fn(self.tasks, manifest, *args)
def run(self, info={}, dry_run=False):
"""Converts the taskgraph into a list and runs all tasks in that list
Args:
info (dict): The bootstrap information object
dry_run (bool): Whether to actually run the tasks or simply step through them
"""
# Create a list for us to run
task_list = self.create_list()
# Output the tasklist
log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list))))
for task in task_list:
# Tasks are not required to have a description
if hasattr(task, 'description'):
log.info(task.description)
else:
# If there is no description, simply coerce the task into a string and print its name
log.info('Running {task}'.format(task=task))
if not dry_run:
# Run the task
task.run(info)
# Remember which tasks have been run for later use (e.g. when rolling back, because of an error)
self.tasks_completed.append(task)
def create_list(self):
"""Creates a list of all the tasks that should be run.
"""
from common.phases import order
# Get a hold of all tasks
tasks = self.get_all_tasks()
# Make sure the taskset is a subset of all the tasks we have gathered
self.tasks.issubset(tasks)
# Create a graph over all tasks by creating a map of each tasks successors
graph = {}
for task in self.tasks:
for task in tasks:
# Do a sanity check first
self.check_ordering(task)
successors = set()
# Add all successors mentioned in the task
successors.update(task.successors)
successors.update(filter(lambda succ: task in succ.predecessors, self.tasks))
# Add all tasks that mention this task as a predecessor
successors.update(filter(lambda succ: task in succ.predecessors, tasks))
# Create a list of phases that succeed the phase of this task
succeeding_phases = order[order.index(task.phase) + 1:]
successors.update(filter(lambda succ: succ.phase in succeeding_phases, self.tasks))
graph[task] = filter(lambda succ: succ in self.tasks, successors)
# Add all tasks that occur in above mentioned succeeding phases
successors.update(filter(lambda succ: succ.phase in succeeding_phases, tasks))
# Map the successors to the task
graph[task] = successors
# Use the strongly connected components algorithm to check for cycles in our task graph
components = self.strongly_connected_components(graph)
cycles_found = 0
for component in components:
# Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out
if len(component) > 1:
cycles_found += 1
log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component))))
@ -52,18 +98,79 @@ class TaskList(object):
'consult the logfile for more information.'.format(cycles_found))
raise TaskListError(msg)
# Run a topological sort on the graph, returning an ordered list
sorted_tasks = self.topological_sort(graph)
# Filter out any tasks not in the tasklist
# We want to maintain ordering, so we don't use set intersection
sorted_tasks = filter(lambda task: task in self.tasks, sorted_tasks)
return sorted_tasks
def get_all_tasks(self):
"""Gets a list of all task classes in the package
Returns:
list. A list of all tasks in the package
"""
# Get a generator that returns all classes in the package
classes = self.get_all_classes('..')
# lambda function to check whether a class is a task (excluding the superclass Task)
def is_task(obj):
from task import Task
return issubclass(obj, Task) and obj is not Task
return filter(is_task, classes) # Only return classes that are tasks
def get_all_classes(self, path=None):
""" Given a path to a package, this function retrieves all the classes in it
Args:
path (str): Path to the package
Returns:
generator. A generator that yields classes
Raises:
Exception
"""
import pkgutil
import importlib
import inspect
def walk_error(module):
raise Exception('Unable to inspect module `{module}\''.format(module=module))
walker = pkgutil.walk_packages(path, '', walk_error)
for _, module_name, _ in walker:
module = importlib.import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
for class_name, obj in classes:
# We only want classes that are defined in the module, and not imported ones
if obj.__module__ == module_name:
yield obj
def check_ordering(self, task):
"""Checks the ordering of a task in relation to other tasks and their phases
This function checks for a subset of what the strongly connected components algorithm does,
but can deliver a more precise error message, namely that there is a conflict between
what a task has specified as its predecessors or successors and in which phase it is placed.
Args:
task (Task): The task to check the ordering for
Raises:
TaskListError
"""
for successor in task.successors:
# Run through all successors and check whether the phase of the task
# comes before the phase of a successor
if successor.phase > successor.phase:
msg = ("The task {task} is specified as running before {other}, "
"but its phase '{phase}' lies after the phase '{other_phase}'"
.format(task=task, other=successor, phase=task.phase, other_phase=successor.phase))
raise TaskListError(msg)
for predecessor in task.predecessors:
# Run through all predecessors and check whether the phase of the task
# comes after the phase of a predecessor
if task.phase < predecessor.phase:
msg = ("The task {task} is specified as running after {other}, "
"but its phase '{phase}' lies before the phase '{other_phase}'"
@ -71,9 +178,15 @@ class TaskList(object):
raise TaskListError(msg)
def strongly_connected_components(self, graph):
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
# Find the strongly connected components in a graph using Tarjan's algorithm.
# graph should be a dictionary mapping node names to lists of successor nodes.
"""Find the strongly connected components in a graph using Tarjan's algorithm.
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Args:
graph (dict): mapping of tasks to lists of successor tasks
Returns:
list. List of tuples that are strongly connected comoponents
"""
result = []
stack = []
@ -105,7 +218,15 @@ class TaskList(object):
return result
def topological_sort(self, graph):
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
"""Runs a topological sort on a graph
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Args:
graph (dict): mapping of tasks to lists of successor tasks
Returns:
list. A list of all tasks in the graph sorted according to ther dependencies
"""
count = {}
for node in graph:
count[node] = 0

View file

@ -4,9 +4,9 @@ class Bytes(object):
units = {'B': 1,
'KiB': 1024,
'MiB': 1024*1024,
'GiB': 1024*1024*1024,
'TiB': 1024*1024*1024*1024,
'MiB': 1024 * 1024,
'GiB': 1024 * 1024 * 1024,
'TiB': 1024 * 1024 * 1024 * 1024,
}
def __init__(self, qty):

View file

@ -12,14 +12,14 @@ class LoopbackVolume(Volume):
def _before_create(self, e):
self.image_path = e.image_path
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
log_check_call(['/usr/bin/qemu-img', 'create', '-f', 'raw', self.image_path, vol_size])
log_check_call(['qemu-img', 'create', '-f', 'raw', self.image_path, vol_size])
def _before_attach(self, e):
[self.loop_device_path] = log_check_call(['/sbin/losetup', '--show', '--find', self.image_path])
[self.loop_device_path] = log_check_call(['losetup', '--show', '--find', self.image_path])
self.device_path = self.loop_device_path
def _before_detach(self, e):
log_check_call(['/sbin/losetup', '--detach', self.loop_device_path])
log_check_call(['losetup', '--detach', self.loop_device_path])
del self.loop_device_path
del self.device_path

View file

@ -9,7 +9,7 @@ class QEMUVolume(LoopbackVolume):
def _before_create(self, e):
self.image_path = e.image_path
vol_size = str(self.size.get_qty_in('MiB')) + 'M'
log_check_call(['/usr/bin/qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size])
log_check_call(['qemu-img', 'create', '-f', self.qemu_format, self.image_path, vol_size])
def _check_nbd_module(self):
from base.fs.partitionmaps.none import NoPartitions
@ -40,11 +40,11 @@ class QEMUVolume(LoopbackVolume):
def _before_attach(self, e):
self._check_nbd_module()
self.loop_device_path = self._find_free_nbd_device()
log_check_call(['/usr/bin/qemu-nbd', '--connect', self.loop_device_path, self.image_path])
log_check_call(['qemu-nbd', '--connect', self.loop_device_path, self.image_path])
self.device_path = self.loop_device_path
def _before_detach(self, e):
log_check_call(['/usr/bin/qemu-nbd', '--disconnect', self.loop_device_path])
log_check_call(['qemu-nbd', '--disconnect', self.loop_device_path])
del self.loop_device_path
del self.device_path

View file

@ -12,19 +12,21 @@ from common.tasks import security
from common.tasks import locale
base_set = [workspace.CreateWorkspace,
host.HostDependencies,
host.CheckHostDependencies,
bootstrap.AddRequiredCommands,
host.CheckExternalCommands,
bootstrap.Bootstrap,
workspace.DeleteWorkspace,
]
volume_set = [volume.Attach,
volume.Detach,
filesystem.AddRequiredCommands,
filesystem.Format,
filesystem.FStab,
]
partitioning_set = [partitioning.PartitionVolume,
partitioning_set = [partitioning.AddRequiredCommands,
partitioning.PartitionVolume,
partitioning.MapPartitions,
partitioning.UnmapPartitions,
]

View file

@ -26,11 +26,11 @@ class AddDefaultSources(Task):
sections = 'main'
if 'sections' in info.manifest.system:
sections = ' '.join(info.manifest.system['sections'])
info.source_lists.add('main', 'deb {apt_mirror} {system.release} '+sections)
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} '+sections)
info.source_lists.add('main', 'deb {apt_mirror} {system.release} ' + sections)
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release} ' + sections)
if info.manifest.system['release'] not in {'testing', 'unstable'}:
info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates '+sections)
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates '+sections)
info.source_lists.add('main', 'deb {apt_mirror} {system.release}-updates ' + sections)
info.source_lists.add('main', 'deb-src {apt_mirror} {system.release}-updates ' + sections)
class InstallTrustedKeys(Task):
@ -87,8 +87,8 @@ class AptUpdate(Task):
@classmethod
def run(cls, info):
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'update'])
log_check_call(['chroot', info.root,
'apt-get', 'update'])
class AptUpgrade(Task):
@ -100,15 +100,15 @@ class AptUpgrade(Task):
def run(cls, info):
from subprocess import CalledProcessError
try:
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'install',
'--fix-broken',
'--no-install-recommends',
'--assume-yes'])
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'upgrade',
'--no-install-recommends',
'--assume-yes'])
log_check_call(['chroot', info.root,
'apt-get', 'install',
'--fix-broken',
'--no-install-recommends',
'--assume-yes'])
log_check_call(['chroot', info.root,
'apt-get', 'upgrade',
'--no-install-recommends',
'--assume-yes'])
except CalledProcessError as e:
if e.returncode == 100:
import logging
@ -125,9 +125,9 @@ class PurgeUnusedPackages(Task):
@classmethod
def run(cls, info):
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'autoremove',
'--purge'])
log_check_call(['chroot', info.root,
'apt-get', 'autoremove',
'--purge'])
class AptClean(Task):
@ -136,8 +136,8 @@ class AptClean(Task):
@classmethod
def run(cls, info):
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'clean'])
log_check_call(['chroot', info.root,
'apt-get', 'clean'])
lists = os.path.join(info.root, 'var/lib/apt/lists')
for list_file in [os.path.join(lists, f) for f in os.listdir(lists)]:

View file

@ -91,9 +91,9 @@ class InstallGrub(Task):
idx=idx + 1))
# Install grub
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/sbin/grub-install', device_path])
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub'])
log_check_call(['chroot', info.root,
'grub-install', device_path])
log_check_call(['chroot', info.root, 'update-grub'])
except Exception as e:
if isinstance(info.volume, LoopbackVolume):
remount(info.volume, unlink_fn)
@ -127,12 +127,12 @@ class InstallExtLinux(Task):
bootloader = '/usr/lib/syslinux/gptmbr.bin'
else:
bootloader = '/usr/lib/extlinux/mbr.bin'
log_check_call(['/usr/sbin/chroot', info.root,
'/bin/dd', 'bs=440', 'count=1',
log_check_call(['chroot', info.root,
'dd', 'bs=440', 'count=1',
'if=' + bootloader,
'of=' + info.volume.device_path])
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/extlinux',
log_check_call(['chroot', info.root,
'extlinux',
'--install', '/boot/extlinux'])
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/sbin/extlinux-update'])
log_check_call(['chroot', info.root,
'extlinux-update'])

View file

@ -1,12 +1,23 @@
from base import Task
from common import phases
from common.exceptions import TaskError
import host
import logging
log = logging.getLogger(__name__)
class AddRequiredCommands(Task):
description = 'Adding commands required bootstrapping Debian'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
info.host_dependencies['debootstrap'] = 'debootstrap'
def get_bootstrap_args(info):
executable = ['/usr/sbin/debootstrap']
executable = ['debootstrap']
options = ['--arch=' + info.manifest.system['architecture']]
if len(info.include_packages) > 0:
options.append('--include=' + ','.join(info.include_packages))

View file

@ -29,7 +29,7 @@ class ShredHostkeys(Task):
public = [path + '.pub' for path in private]
from common.tools import log_check_call
log_check_call(['/usr/bin/shred', '--remove'] + private + public)
log_check_call(['shred', '--remove'] + private + public)
class CleanTMP(Task):

View file

@ -2,10 +2,22 @@ from base import Task
from common import phases
from common.tools import log_check_call
from bootstrap import Bootstrap
from common.tasks import apt
import apt
import host
import volume
class AddRequiredCommands(Task):
description = 'Adding commands required for formatting the partitions'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions):
info.host_dependencies['mkfs.xfs'] = 'xfsprogs'
class Format(Task):
description = 'Formatting the volume'
phase = phases.volume_preparation
@ -31,7 +43,7 @@ class TuneVolumeFS(Task):
for partition in info.volume.partition_map.partitions:
if not isinstance(partition, UnformattedPartition):
if re.match('^ext[2-4]$', partition.filesystem) is not None:
log_check_call(['/sbin/tune2fs', '-i', '0', partition.device_path])
log_check_call(['tune2fs', '-i', '0', partition.device_path])
class AddXFSProgs(Task):

View file

@ -3,47 +3,29 @@ from common import phases
from common.exceptions import TaskError
class HostDependencies(Task):
description = 'Determining required host dependencies'
class CheckExternalCommands(Task):
description = 'Checking availability of external commands'
phase = phases.preparation
@classmethod
def run(cls, info):
info.host_dependencies.add('debootstrap')
from common.fs.loopbackvolume import LoopbackVolume
if isinstance(info.volume, LoopbackVolume):
info.host_dependencies.add('qemu-utils')
if 'xfs' in (p.filesystem for p in info.volume.partition_map.partitions):
info.host_dependencies.add('xfsprogs')
from base.fs.partitionmaps.none import NoPartitions
if not isinstance(info.volume.partition_map, NoPartitions):
info.host_dependencies.update(['parted', 'kpartx'])
class CheckHostDependencies(Task):
description = 'Checking installed host packages'
phase = phases.preparation
predecessors = [HostDependencies]
@classmethod
def run(cls, info):
from common.tools import log_check_call
from subprocess import CalledProcessError
import re
missing_packages = []
for package in info.host_dependencies:
for command, package in info.host_dependencies.items():
try:
import os.path
if os.path.isfile('/usr/bin/dpkg-query'):
log_check_call(['/usr/bin/dpkg-query', '-s', package])
log_check_call(['type ' + command], shell=True)
except CalledProcessError:
missing_packages.append(package)
if re.match('^https?:\/\/', package):
msg = ('The command `{command}\' is not available, '
'you can download the software at `{package}\'.'
.format(command=command, package=package))
else:
msg = ('The command `{command}\' is not available, '
'it is located in the package `{package}\'.'
.format(command=command, package=package))
missing_packages.append(msg)
if len(missing_packages) > 0:
pkgs = '\', `'.join(missing_packages)
if len(missing_packages) > 1:
msg = "The packages `{packages}\' are not installed".format(packages=pkgs)
else:
msg = "The package `{packages}\' is not installed".format(packages=pkgs)
msg = '\n'.join(missing_packages)
raise TaskError(msg)

View file

@ -21,10 +21,10 @@ class InstallInitScripts(Task):
dst = os.path.join(info.root, 'etc/init.d', name)
copy(src, dst)
os.chmod(dst, rwxr_xr_x)
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--default', name])
log_check_call(['chroot', info.root, 'insserv', '--default', name])
for name in info.initd['disable']:
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/insserv', '--remove', name])
log_check_call(['chroot', info.root, 'insserv', '--remove', name])
class AddExpandRoot(Task):
@ -49,8 +49,8 @@ class AddSSHKeyGeneration(Task):
install = info.initd['install']
from subprocess import CalledProcessError
try:
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/dpkg-query', '-W', 'openssh-server'])
log_check_call(['chroot', info.root,
'dpkg-query', '-W', 'openssh-server'])
if info.manifest.system['release'] == 'squeeze':
install['generate-ssh-hostkeys'] = os.path.join(init_scripts_dir, 'squeeze/generate-ssh-hostkeys')
else:

View file

@ -28,12 +28,12 @@ class GenerateLocale(Task):
search = '# ' + locale_str
sed_i(locale_gen, search, locale_str)
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/locale-gen'])
log_check_call(['chroot', info.root, 'locale-gen'])
lang = '{locale}.{charmap}'.format(locale=info.manifest.system['locale'],
charmap=info.manifest.system['charmap'])
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/sbin/update-locale', 'LANG=' + lang])
log_check_call(['chroot', info.root,
'update-locale', 'LANG=' + lang])
class SetTimezone(Task):

View file

@ -1,8 +1,25 @@
from base import Task
from common import phases
import host
import volume
class AddRequiredCommands(Task):
description = 'Adding commands required for creating loopback volumes'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
from common.fs.loopbackvolume import LoopbackVolume
if isinstance(info.volume, LoopbackVolume):
info.host_dependencies['qemu-img'] = 'qemu-utils'
info.host_dependencies['losetup'] = 'mount'
from common.fs.qemuvolume import QEMUVolume
if isinstance(info.volume, QEMUVolume):
info.host_dependencies['losetup'] = 'mount'
class Create(Task):
description = 'Creating a loopback volume'
phase = phases.volume_creation

View file

@ -1,19 +1,12 @@
// This is a mapping of Debian release codenames to NIC configurations
// Every item in an array is a line
{
"squeeze": [
"auto lo",
"iface lo inet loopback",
"auto eth0",
"iface eth0 inet dhcp" ],
"wheezy": [
"auto eth0",
"iface eth0 inet dhcp" ],
"jessie": [
"auto eth0",
"iface eth0 inet dhcp" ],
"testing": [
"auto eth0",
"iface eth0 inet dhcp" ],
"unstable": [
"auto eth0",
"iface eth0 inet dhcp" ]
"squeeze": ["auto lo",
"iface lo inet loopback",
"auto eth0",
"iface eth0 inet dhcp"],
"wheezy": ["auto eth0",
"iface eth0 inet dhcp"],
"jessie": ["auto eth0",
"iface eth0 inet dhcp"]
}

View file

@ -1,6 +1,6 @@
from base import Task
from common import phases
import os.path
import os
class RemoveDNSInfo(Task):
@ -9,10 +9,8 @@ class RemoveDNSInfo(Task):
@classmethod
def run(cls, info):
from os import remove
import os.path
if os.path.isfile(os.path.join(info.root, 'etc/resolv.conf')):
remove(os.path.join(info.root, 'etc/resolv.conf'))
os.remove(os.path.join(info.root, 'etc/resolv.conf'))
class RemoveHostname(Task):
@ -21,10 +19,8 @@ class RemoveHostname(Task):
@classmethod
def run(cls, info):
from os import remove
import os.path
if os.path.isfile(os.path.join(info.root, 'etc/hostname')):
remove(os.path.join(info.root, 'etc/hostname'))
os.remove(os.path.join(info.root, 'etc/hostname'))
class ConfigureNetworkIF(Task):
@ -33,10 +29,10 @@ class ConfigureNetworkIF(Task):
@classmethod
def run(cls, info):
network_config_path = os.path.join(os.path.dirname(__file__), 'network-configuration.json')
from common.tools import config_get
if_config = config_get(network_config_path, [info.release_codename])
interfaces_path = os.path.join(info.root, 'etc/network/interfaces')
if_config = []
with open('common/tasks/network-configuration.json') as stream:
import json
if_config = json.loads(stream.read())
with open(interfaces_path, 'a') as interfaces:
interfaces.write('\n'.join(if_config.get(info.manifest.system['release'])) + '\n')
interfaces.write('\n'.join(if_config) + '\n')

View file

@ -45,10 +45,10 @@ class InstallPackages(Task):
try:
env = os.environ.copy()
env['DEBIAN_FRONTEND'] = 'noninteractive'
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/apt-get', 'install',
'--no-install-recommends',
'--assume-yes']
log_check_call(['chroot', info.root,
'apt-get', 'install',
'--no-install-recommends',
'--assume-yes']
+ map(str, remote_packages),
env=env)
except CalledProcessError as e:
@ -90,8 +90,8 @@ class InstallPackages(Task):
env = os.environ.copy()
env['DEBIAN_FRONTEND'] = 'noninteractive'
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/dpkg', '--install']
log_check_call(['chroot', info.root,
'dpkg', '--install']
+ chrooted_package_paths,
env=env)

View file

@ -1,9 +1,23 @@
from base import Task
from common import phases
import filesystem
import host
import volume
class AddRequiredCommands(Task):
description = 'Adding commands required for partitioning the volume'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
from base.fs.partitionmaps.none import NoPartitions
if not isinstance(info.volume.partition_map, NoPartitions):
info.host_dependencies['parted'] = 'parted'
info.host_dependencies['kpartx'] = 'kpartx'
class PartitionVolume(Task):
description = 'Partitioning the volume'
phase = phases.volume_preparation

View file

@ -10,7 +10,7 @@ class EnableShadowConfig(Task):
@classmethod
def run(cls, info):
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root, '/sbin/shadowconfig', 'on'])
log_check_call(['chroot', info.root, 'shadowconfig', 'on'])
class DisableSSHPasswordAuthentication(Task):

View file

@ -1,14 +1,14 @@
def log_check_call(command, stdin=None, env=None):
status, stdout, stderr = log_call(command, stdin, env)
def log_check_call(command, stdin=None, env=None, shell=False):
status, stdout, stderr = log_call(command, stdin, env, shell)
if status != 0:
from subprocess import CalledProcessError
raise CalledProcessError(status, ' '.join(command), '\n'.join(stderr))
return stdout
def log_call(command, stdin=None, env=None):
def log_call(command, stdin=None, env=None, shell=False):
import subprocess
import select
@ -22,6 +22,7 @@ def log_call(command, stdin=None, env=None):
popen_args = {'args': command,
'env': env,
'shell': shell,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE, }
@ -56,3 +57,17 @@ def sed_i(file_path, pattern, subst):
import re
for line in fileinput.input(files=file_path, inplace=True):
print re.sub(pattern, subst, line),
def load_json(path):
import json
from minify_json import json_minify
with open(path) as stream:
return json.loads(json_minify(stream.read(), False))
def config_get(path, config_path):
config = load_json(path)
for key in config_path:
config = config.get(key)
return config

View file

@ -2,20 +2,20 @@
"provider": "kvm",
"bootstrapper": {
"workspace": "/target",
"mirror": "http://ftp.fr.debian.org/debian/",
"virtio" : [ "virtio_pci", "virtio_blk" ]
"mirror": "http://ftp.fr.debian.org/debian/"
},
"image": {
"name": "debian-{system.release}-{system.architecture}-{%y}{%m}{%d}",
"description": "Debian {system.release} {system.architecture}"
},
"system": {
"release": "wheezy",
"architecture": "amd64",
"bootloader": "grub",
"timezone": "UTC",
"locale": "en_US",
"charmap": "UTF-8"
"release": "wheezy",
"architecture": "amd64",
"bootloader": "grub",
"timezone": "UTC",
"locale": "en_US",
"charmap": "UTF-8",
"virtio_modules": [ "virtio_pci", "virtio_blk" ]
},
"packages": {},
"volume": {

View file

@ -22,8 +22,8 @@ class CreateAdminUser(Task):
@classmethod
def run(cls, info):
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/sbin/useradd',
log_check_call(['chroot', info.root,
'useradd',
'--create-home', '--shell', '/bin/bash',
info.manifest.plugins['admin_user']['username']])
@ -65,8 +65,8 @@ class DisableRootLogin(Task):
from subprocess import CalledProcessError
from common.tools import log_check_call
try:
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/dpkg-query', '-W', 'openssh-server'])
log_check_call(['chroot', info.root,
'dpkg-query', '-W', 'openssh-server'])
from common.tools import sed_i
sshdconfig_path = os.path.join(info.root, 'etc/ssh/sshd_config')
sed_i(sshdconfig_path, 'PermitRootLogin yes', 'PermitRootLogin no')

View file

@ -68,7 +68,7 @@ class SetMetadataSource(Task):
logging.getLogger(__name__).warn(msg)
return
sources = "cloud-init cloud-init/datasources multiselect " + sources
log_check_call(['/usr/sbin/chroot', info.root, '/usr/bin/debconf-set-selections'], sources)
log_check_call(['chroot', info.root, 'debconf-set-selections'], sources)
class DisableModules(Task):

View file

@ -5,9 +5,6 @@ def validate_manifest(data, validator, error):
import os.path
schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema.json')
validator(data, schema_path)
if 'zerofree' in data['plugins']['minimize_size']:
zerofree_schema_path = os.path.join(os.path.dirname(__file__), 'manifest-schema-zerofree.json')
validator(data, zerofree_schema_path)
if data['plugins']['minimize_size'].get('shrink', False) and data['volume']['backing'] != 'vmdk':
error('Can only shrink vmdk images', ['plugins', 'minimize_size', 'shrink'])
@ -16,11 +13,11 @@ def resolve_tasks(taskset, manifest):
taskset.update([tasks.AddFolderMounts,
tasks.RemoveFolderMounts,
])
if 'zerofree' in manifest.plugins['minimize_size']:
taskset.add(tasks.CheckZerofreePath)
if manifest.plugins['minimize_size'].get('zerofree', False):
taskset.add(tasks.AddRequiredCommands)
taskset.add(tasks.Zerofree)
if manifest.plugins['minimize_size'].get('shrink', False):
taskset.add(tasks.CheckVMWareDMCommand)
taskset.add(tasks.AddRequiredCommands)
taskset.add(tasks.ShrinkVolume)

View file

@ -1,18 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Minimize size plugin manifest",
"type": "object",
"properties": {
"volume": {
"type": "object",
"properties": {
"partitions": {
"type": "object",
"properties": {
"type": { "enum": ["none"] }
}
}
}
}
}
}

View file

@ -10,16 +10,10 @@
"type": "object",
"properties": {
"shrink": { "type": "boolean" },
"zerofree": { "$ref": "#/definitions/absolute_path" }
"zerofree": { "type": "boolean" }
}
}
}
}
},
"definitions": {
"absolute_path": {
"type": "string",
"pattern": "^/[^\\0]+$"
}
}
}

View file

@ -3,6 +3,7 @@ from common import phases
from common.tasks import apt
from common.tasks import bootstrap
from common.tasks import filesystem
from common.tasks import host
from common.tasks import partitioning
from common.tasks import volume
import os
@ -46,48 +47,30 @@ class RemoveFolderMounts(Task):
del info.minimize_size_folder
class CheckZerofreePath(Task):
description = 'Checking path to zerofree tool'
class AddRequiredCommands(Task):
description = 'Adding commands required for reducing volume size'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
from common.exceptions import TaskError
import os
zerofree = info.manifest.plugins['minimize_size']['zerofree']
if not os.path.isfile(zerofree):
raise TaskError('The path `{path}\' does not exist or is not a file'.format(path=zerofree))
if not os.access(zerofree, os.X_OK):
raise TaskError('The path `{path}\' is not executable'.format(path=zerofree))
if info.manifest.plugins['minimize_size'].get('zerofree', False):
info.host_dependencies['zerofree'] = 'zerofree'
if info.manifest.plugins['minimize_size'].get('shrink', False):
link = 'https://my.vmware.com/web/vmware/info/slug/desktop_end_user_computing/vmware_workstation/10_0'
info.host_dependencies['vmware-vdiskmanager'] = link
# Get zerofree here: http://intgat.tigress.co.uk/rmy/uml/index.html
class Zerofree(Task):
description = 'Zeroing unused blocks on the volume'
description = 'Zeroing unused blocks on the root partition'
phase = phases.volume_unmounting
predecessors = [filesystem.UnmountRoot, partitioning.UnmapPartitions]
successors = [volume.Detach]
predecessors = [filesystem.UnmountRoot]
successors = [partitioning.UnmapPartitions, volume.Detach]
@classmethod
def run(cls, info):
from common.tools import log_check_call
zerofree = info.manifest.plugins['minimize_size']['zerofree']
log_check_call([zerofree, info.volume.device_path])
class CheckVMWareDMCommand(Task):
description = 'Checking path to vmware-vdiskmanager tool'
phase = phases.preparation
@classmethod
def run(cls, info):
from common.exceptions import TaskError
import os
vdiskmngr = '/usr/bin/vmware-vdiskmanager'
if not os.path.isfile(vdiskmngr):
raise TaskError('Unable to find vmware-vdiskmanager at `{path}\''.format(path=vdiskmngr))
if not os.access(vdiskmngr, os.X_OK):
raise TaskError('vmware-vdiskmanager at `{path}\' is not executable'.format(path=vdiskmngr))
log_check_call(['zerofree', info.volume.partition_map.root.device_path])
class ShrinkVolume(Task):

View file

@ -8,9 +8,10 @@ def validate_manifest(data, validator, error):
def resolve_tasks(taskset, manifest):
taskset.add(tasks.CheckPaths)
taskset.add(tasks.AddPackages)
if 'assets' in manifest.plugins['puppet']:
taskset.add(tasks.CheckAssetsPath)
taskset.add(tasks.CopyPuppetAssets)
if 'manifest' in manifest.plugins['puppet']:
taskset.add(tasks.CheckManifestPath)
taskset.add(tasks.ApplyPuppetManifest)

View file

@ -5,8 +5,8 @@ from common.tasks import network
import os
class CheckPaths(Task):
description = 'Checking whether manifest and assets paths exist'
class CheckAssetsPath(Task):
description = 'Checking whether the assets path exist'
phase = phases.preparation
@classmethod
@ -20,6 +20,14 @@ class CheckPaths(Task):
msg = 'The assets path {assets} does not point to a directory.'.format(assets=assets)
raise TaskError(msg)
class CheckManifestPath(Task):
description = 'Checking whether the manifest path exist'
phase = phases.preparation
@classmethod
def run(cls, info):
from common.exceptions import TaskError
manifest = info.manifest.plugins['puppet']['manifest']
if not os.path.exists(manifest):
msg = 'The manifest file {manifest} does not exist.'.format(manifest=manifest)
@ -84,10 +92,20 @@ class ApplyPuppetManifest(Task):
manifest_path = os.path.join('/', manifest_rel_dst)
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/puppet', 'apply', manifest_path])
log_check_call(['chroot', info.root,
'puppet', 'apply', manifest_path])
os.remove(manifest_dst)
from common.tools import sed_i
hosts_path = os.path.join(info.root, 'etc/hosts')
sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '')
class EnableAgent(Task):
description = 'Enabling the puppet agent'
phase = phases.system_modification
@classmethod
def run(cls, info):
puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet')
sed_i(puppet_defaults, 'START=no', 'START=yes')

View file

@ -9,5 +9,5 @@ class SetRootPassword(Task):
@classmethod
def run(cls, info):
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'],
log_check_call(['chroot', info.root, 'chpasswd'],
'root:' + info.manifest.plugins['root_password']['password'])

View file

@ -71,8 +71,8 @@ class CreateVagrantUser(Task):
@classmethod
def run(cls, info):
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/sbin/useradd',
log_check_call(['chroot', info.root,
'useradd',
'--create-home', '--shell', '/bin/bash',
'vagrant'])
@ -115,8 +115,8 @@ class AddInsecurePublicKey(Task):
# We can't do this directly with python, since getpwnam gets its info from the host
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root,
'/bin/chown', 'vagrant:vagrant',
log_check_call(['chroot', info.root,
'chown', 'vagrant:vagrant',
'/home/vagrant/.ssh', '/home/vagrant/.ssh/authorized_keys'])
@ -127,7 +127,7 @@ class SetRootPassword(Task):
@classmethod
def run(cls, info):
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/chpasswd'], 'root:vagrant')
log_check_call(['chroot', info.root, 'chpasswd'], 'root:vagrant')
class PackageBox(Task):

View file

@ -61,7 +61,7 @@ def resolve_tasks(taskset, manifest):
if manifest.volume['partitions']['type'] != 'none':
taskset.update(common.task_sets.partitioning_set)
taskset.update([tasks.host.HostDependencies,
taskset.update([tasks.host.AddExternalCommands,
tasks.packages.DefaultPackages,
tasks.connection.GetCredentials,
tasks.host.GetInfo,
@ -97,7 +97,8 @@ def resolve_tasks(taskset, manifest):
tasks.ebs.Attach,
filesystem.FStab,
tasks.ebs.Snapshot],
's3': [loopback.Create,
's3': [loopback.AddRequiredCommands,
loopback.Create,
volume.Attach,
tasks.filesystem.S3FStab,
tasks.ami.BundleImage,

View file

@ -0,0 +1,34 @@
// This is a mapping of EC2 regions to processor architectures to Amazon Kernel Images
// Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs
{
"ap-northeast-1": // Asia Pacific (Tokyo) Region
{"i386": "aki-136bf512", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-176bf516"}, // pv-grub-hd0_1.04-x86_64.gz
"ap-southeast-1": // Asia Pacific (Singapore) Region
{"i386": "aki-ae3973fc", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-503e7402"}, // pv-grub-hd0_1.04-x86_64.gz
"ap-southeast-2": // Asia Pacific (Sydney) Region
{"i386": "aki-cd62fff7", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-c362fff9"}, // pv-grub-hd0_1.04-x86_64.gz
"eu-west-1": // EU (Ireland) Region
{"i386": "aki-68a3451f", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-52a34525"}, // pv-grub-hd0_1.04-x86_64.gz
"sa-east-1": // South America (Sao Paulo) Region
{"i386": "aki-5b53f446", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-5553f448"}, // pv-grub-hd0_1.04-x86_64.gz
"us-east-1": // US East (Northern Virginia) Region
{"i386": "aki-8f9dcae6", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-919dcaf8"}, // pv-grub-hd0_1.04-x86_64.gz
"us-gov-west-1": // AWS GovCloud (US)
{"i386": "aki-1fe98d3c", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-1de98d3e"}, // pv-grub-hd0_1.04-x86_64.gz
"us-west-1": // US West (Northern California) Region
{"i386": "aki-8e0531cb", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-880531cd"}, // pv-grub-hd0_1.04-x86_64.gz
"us-west-2": // US West (Oregon) Region
{"i386": "aki-f08f11c0", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-fc8f11cc"}, // pv-grub-hd0_1.04-x86_64.gz
"cn-north-1":// China North (Beijing) Region
{"i386": "aki-908f1da9", // pv-grub-hd0_1.04-i386.gz
"amd64": "aki-9e8f1da7"} // pv-grub-hd0_1.04-x86_64.gz
}

View file

@ -38,8 +38,10 @@ class BundleImage(Task):
def run(cls, info):
bundle_name = 'bundle-{id}'.format(id=info.run_id)
info.bundle_path = os.path.join(info.workspace, bundle_name)
log_check_call(['/usr/bin/euca-bundle-image',
arch = {'i386': 'i386', 'amd64': 'x86_64'}.get(info.manifest.system['architecture'])
log_check_call(['euca-bundle-image',
'--image', info.volume.image_path,
'--arch', arch,
'--user', info.credentials['user-id'],
'--privatekey', info.credentials['private-key'],
'--cert', info.credentials['certificate'],
@ -63,7 +65,7 @@ class UploadImage(Task):
else:
s3_url = 'https://s3-{region}.amazonaws.com/'.format(region=info.host['region'])
info.manifest.manifest_location = info.manifest.image['bucket'] + '/' + info.ami_name + '.manifest.xml'
log_check_call(['/usr/bin/euca-upload-bundle',
log_check_call(['euca-upload-bundle',
'--bucket', info.manifest.image['bucket'],
'--manifest', manifest_file,
'--access-key', info.credentials['access-key'],
@ -90,48 +92,6 @@ class RegisterAMI(Task):
phase = phases.image_registration
predecessors = [Snapshot, UploadImage]
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs
kernel_mapping = {'ap-northeast-1': # Asia Pacific (Tokyo) Region
{'i386': 'aki-136bf512', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-176bf516' # pv-grub-hd0_1.04-x86_64.gz
},
'ap-southeast-1': # Asia Pacific (Singapore) Region
{'i386': 'aki-ae3973fc', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-503e7402' # pv-grub-hd0_1.04-x86_64.gz
},
'ap-southeast-2': # Asia Pacific (Sydney) Region
{'i386': 'aki-cd62fff7', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-c362fff9' # pv-grub-hd0_1.04-x86_64.gz
},
'eu-west-1': # EU (Ireland) Region
{'i386': 'aki-68a3451f', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-52a34525' # pv-grub-hd0_1.04-x86_64.gz
},
'sa-east-1': # South America (Sao Paulo) Region
{'i386': 'aki-5b53f446', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-5553f448' # pv-grub-hd0_1.04-x86_64.gz
},
'us-east-1': # US East (Northern Virginia) Region
{'i386': 'aki-8f9dcae6', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-919dcaf8' # pv-grub-hd0_1.04-x86_64.gz
},
'us-gov-west-1': # AWS GovCloud (US)
{'i386': 'aki-1fe98d3c', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-1de98d3e' # pv-grub-hd0_1.04-x86_64.gz
},
'us-west-1': # US West (Northern California) Region
{'i386': 'aki-8e0531cb', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-880531cd' # pv-grub-hd0_1.04-x86_64.gz
},
'us-west-2': # US West (Oregon) Region
{'i386': 'aki-f08f11c0', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-fc8f11cc' # pv-grub-hd0_1.04-x86_64.gz
},
'cn-north-1': # China North (Beijing) Region
{'i386': 'aki-908f1da9', # pv-grub-hd0_1.04-i386.gz
'amd64': 'aki-9e8f1da7' # pv-grub-hd0_1.04-x86_64.gz
}
}
@classmethod
def run(cls, info):
registration_params = {'name': info.ami_name,
@ -140,17 +100,11 @@ class RegisterAMI(Task):
'amd64': 'x86_64'}.get(info.manifest.system['architecture'])
if info.manifest.volume['backing'] == 's3':
grub_boot_device = 'hd0'
registration_params['image_location'] = info.manifest.manifest_location
else:
root_dev_name = {'pvm': '/dev/sda',
'hvm': '/dev/xvda'}.get(info.manifest.data['virtualization'])
registration_params['root_device_name'] = root_dev_name
from base.fs.partitionmaps.none import NoPartitions
if isinstance(info.volume.partition_map, NoPartitions):
grub_boot_device = 'hd0'
else:
grub_boot_device = 'hd00'
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import BlockDeviceMapping
@ -163,8 +117,9 @@ class RegisterAMI(Task):
registration_params['virtualization_type'] = 'hvm'
else:
registration_params['virtualization_type'] = 'paravirtual'
registration_params['kernel_id'] = (cls.kernel_mapping
.get(info.host['region'])
.get(info.manifest.system['architecture']))
akis_path = os.path.join(os.path.dirname(__file__), 'akis.json')
from common.tools import config_get
registration_params['kernel_id'] = config_get(akis_path, [info.host['region'],
info.manifest.system['architecture']])
info.image = info.connection.register_image(**registration_params)

View file

@ -45,6 +45,6 @@ class ConfigurePVGrub(Task):
'GRUB_HIDDEN_TIMEOUT=true')
from common.tools import log_check_call
log_check_call(['/usr/sbin/chroot', info.root, '/usr/sbin/update-grub'])
log_check_call(['/usr/sbin/chroot', info.root,
'/bin/ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst'])
log_check_call(['chroot', info.root, 'update-grub'])
log_check_call(['chroot', info.root,
'ln', '--symbolic', '/boot/grub/grub.cfg', '/boot/grub/menu.lst'])

View file

@ -3,15 +3,16 @@ from common import phases
from common.tasks import host
class HostDependencies(Task):
description = 'Adding required host packages for EC2 bootstrapping'
class AddExternalCommands(Task):
description = 'Determining required external commands for EC2 bootstrapping'
phase = phases.preparation
successors = [host.CheckHostDependencies]
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
if info.manifest.volume['backing'] == 's3':
info.host_dependencies.add('euca2ools')
info.host_dependencies['euca-bundle-image'] = 'euca2ools'
info.host_dependencies['euca-upload-bundle'] = 'euca2ools'
class GetInfo(Task):

View file

@ -40,17 +40,17 @@ class InstallEnhancedNetworking(Task):
urllib.urlretrieve(drivers_url, archive)
from common.tools import log_check_call
log_check_call('/bin/tar', '--ungzip',
'--extract',
'--file', archive,
'--directory', os.path.join(info.root, 'tmp'))
log_check_call('tar', '--ungzip',
'--extract',
'--file', archive,
'--directory', os.path.join(info.root, 'tmp'))
src_dir = os.path.join('/tmp', os.path.basename(drivers_url), 'src')
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/make', '--directory', src_dir])
log_check_call(['/usr/sbin/chroot', info.root,
'/usr/bin/make', 'install',
'--directory', src_dir])
log_check_call(['chroot', info.root,
'make', '--directory', src_dir])
log_check_call(['chroot', info.root,
'make', 'install',
'--directory', src_dir])
ixgbevf_conf_path = os.path.join(info.root, 'etc/modprobe.d/ixgbevf.conf')
with open(ixgbevf_conf_path, 'w') as ixgbevf_conf:

View file

@ -1,17 +1,12 @@
// This is a mapping of Debian release codenames to processor architectures to kernel packages
{
"squeeze": {
"amd64": "linux-image-xen-amd64",
"i386" : "linux-image-xen-686" },
"wheezy": {
"amd64": "linux-image-amd64",
"i386" : "linux-image-686" },
"jessie": {
"amd64": "linux-image-amd64",
"i386" : "linux-image-686" },
"testing": {
"amd64": "linux-image-amd64",
"i386" : "linux-image-686" },
"unstable": {
"amd64": "linux-image-amd64",
"i386" : "linux-image-686" }
"squeeze": // In squeeze, we need a special kernel flavor for xen
{"i386": "linux-image-xen-686",
"amd64": "linux-image-xen-amd64"},
"wheezy":
{"i386": "linux-image-686",
"amd64": "linux-image-amd64"},
"jessie":
{"i386": "linux-image-686",
"amd64": "linux-image-amd64"}
}

View file

@ -17,10 +17,9 @@ class DefaultPackages(Task):
info.exclude_packages.add('isc-dhcp-client')
info.exclude_packages.add('isc-dhcp-common')
# In squeeze, we need a special kernel flavor for xen
kernels = {}
with open('providers/ec2/tasks/packages-kernels.json') as stream:
import json
kernels = json.loads(stream.read())
kernel_package = kernels.get(info.manifest.system['release']).get(info.manifest.system['architecture'])
import os.path
kernel_packages_path = os.path.join(os.path.dirname(__file__), 'packages-kernels.json')
from common.tools import config_get
kernel_package = config_get(kernel_packages_path, [info.release_codename,
info.manifest.system['architecture']])
info.packages.add(kernel_package)

View file

@ -3,21 +3,22 @@
"title": "KVM manifest",
"type": "object",
"properties": {
"bootstrapper": {
"system": {
"type": "object",
"properties": {
"virtio": {
"type": "array",
"items": {
"type": "string"
"type": "string",
"enum": ["virtio",
"virtio_pci",
"virtio_balloon",
"virtio_blk",
"virtio_net",
"virtio_ring"]
},
"minItems": 1
}
}
},
"system": {
"type": "object",
"properties": {
},
"bootloader": {
"type": "string",
"enum": ["grub", "extlinux"]

View file

@ -12,5 +12,5 @@ class VirtIO(Task):
modules = os.path.join(info.root, '/etc/initramfs-tools/modules')
with open(modules, "a") as modules_file:
modules_file.write("\n")
for module in info.manifest.bootstrapper.get('virtio', []):
modules_file.write(module+"\n")
for module in info.manifest.system.get('virtio', []):
modules_file.write(module + "\n")

View file

@ -29,8 +29,8 @@ class AddGuestAdditionsPackages(Task):
info.packages.add('dkms')
from common.tools import log_check_call
[kernel_version] = log_check_call(['/usr/sbin/chroot', info.root,
'/bin/uname', '-r'])
[kernel_version] = log_check_call(['chroot', info.root,
'uname', '-r'])
kernel_headers_pkg = 'linux-headers-{version}'.format(version=kernel_version)
info.packages.add(kernel_headers_pkg)
@ -52,7 +52,7 @@ class InstallGuestAdditions(Task):
install_script = os.path.join('/', mount_dir, 'VBoxLinuxAdditions.run')
from common.tools import log_call
status, out, err = log_call(['/usr/sbin/chroot', info.root,
status, out, err = log_call(['chroot', info.root,
install_script, '--nox11'])
# Install will exit with $?=1 because X11 isn't installed
if status != 1: