Everything in base/ is now commented.

This commit is contained in:
Anders Ingemann 2014-03-23 16:04:03 +01:00
parent ca13d66b16
commit da4b85c0c7
27 changed files with 757 additions and 5 deletions

View file

@ -5,6 +5,13 @@ from main import main
def validate_manifest(data, validator, error):
"""Validates the manifest using the base manifest
Args:
data (dict): The data of the manifest
validator (function): The function that validates the manifest given the data and a path
error (function): The function tha raises an error when the validation fails
"""
import os.path
schema_path = os.path.normpath(os.path.join(os.path.dirname(__file__), 'manifest-schema.json'))
validator(data, schema_path)

View file

@ -1,26 +1,46 @@
class BootstrapInformation(object):
"""The BootstrapInformation class holds all information about the bootstrapping process.
The nature of the attributes of this class are rather diverse.
Tasks may set their own attributes on this class for later retrieval by another task.
Information that becomes invalid (e.g. a path to a file that has been deleted) must be removed.
"""
def __init__(self, manifest=None, debug=False):
"""Instantiates a new bootstrap info object.
Args:
manifest (Manifest): The manifest
debug (bool): Whether debugging is turned on
"""
# Set the manifest attribute.
self.manifest = manifest
self.debug = debug
# Create a run_id. This id may be used to uniquely identify the currrent bootstrapping process
import random
self.run_id = '{id:08x}'.format(id=random.randrange(16 ** 8))
# Define the path to our workspace
import os.path
self.workspace = os.path.join(manifest.bootstrapper['workspace'], self.run_id)
# Load all the volume information
from fs import load_volume
self.volume = load_volume(self.manifest.volume, manifest.system['bootloader'])
# The default apt mirror
self.apt_mirror = self.manifest.packages.get('mirror', 'http://http.debian.net/debian')
# Normalize the release codenames so that tasks may query for release codenames rather than
# 'stable', 'unstable' etc. This is useful when handling cases that are specific to a release.
release_codenames_path = os.path.join(os.path.dirname(__file__), 'release-codenames.json')
from common.tools import config_get
self.release_codename = config_get(release_codenames_path, [self.manifest.system['release']])
class DictClass(dict):
"""Tiny extension of dict to allow setting and getting keys via attributes
"""
def __getattr__(self, name):
return self[name]
@ -28,18 +48,29 @@ class BootstrapInformation(object):
self[name] = value
def set_manifest_vars(obj, data):
"""Runs through the manifest and creates DictClasses for every key
Args:
obj (dict): dictionary to set the values on
data (dict): dictionary of values to set on the obj
"""
for key, value in data.iteritems():
if isinstance(value, dict):
obj[key] = DictClass()
set_manifest_vars(obj[key], value)
continue
# Lists are not supported
if not isinstance(value, list):
obj[key] = value
# manifest_vars is a dictionary of all the manifest values,
# with it users can cross-reference values in the manifest, so that they do not need to be written twice
self.manifest_vars = {}
self.manifest_vars['apt_mirror'] = self.apt_mirror
set_manifest_vars(self.manifest_vars, self.manifest.data)
# Populate the manifest_vars with datetime information
# and map the datetime variables directly to the dictionary
from datetime import datetime
now = datetime.now()
time_vars = ['%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
@ -48,13 +79,23 @@ class BootstrapInformation(object):
for key in time_vars:
self.manifest_vars[key] = now.strftime(key)
# Keep a list of apt sources,
# so that tasks may add to that list without having to fiddle with apt source list files.
from pkg.sourceslist import SourceLists
self.source_lists = SourceLists(self.manifest_vars)
# Keep a list of packages that should be installed, tasks can add and remove things from this list
from pkg.packagelist import PackageList
self.packages = PackageList(self.manifest_vars, self.source_lists)
# These sets should rarely be used and specify which packages the debootstrap invocation
# should be called with.
self.include_packages = set()
self.exclude_packages = set()
# Dictionary to specify which commands are required on the host.
# The keys are commands, while the values are either package names or urls
# that hint at how a command may be made available.
self.host_dependencies = {}
# Lists of startup scripts that should be installed and disabled
self.initd = {'install': {}, 'disable': []}

View file

@ -1,10 +1,19 @@
def load_volume(data, bootloader):
"""Instantiates a volume that corresponds to the data in the manifest
Args:
data (dict): The 'volume' section from the manifest
bootloader (str): Name of the bootloader the system will boot with
Returns:
Volume. The volume that represents all information pertaining to the volume we bootstrap on
"""
from common.fs.loopbackvolume import LoopbackVolume
from providers.ec2.ebsvolume import EBSVolume
from common.fs.virtualdiskimage import VirtualDiskImage
from common.fs.virtualmachinedisk import VirtualMachineDisk
# Create a mapping between valid partition maps in the manifest and their corresponding classes
from partitionmaps.gpt import GPTPartitionMap
from partitionmaps.msdos import MSDOSPartitionMap
from partitionmaps.none import NoPartitions
@ -12,11 +21,14 @@ def load_volume(data, bootloader):
'gpt': GPTPartitionMap,
'msdos': MSDOSPartitionMap,
}
# Instantiate the partition map
partition_map = partition_maps.get(data['partitions']['type'])(data['partitions'], bootloader)
# Create a mapping between valid volume backings in the manifest and their corresponding classes
volume_backings = {'raw': LoopbackVolume,
's3': LoopbackVolume,
'vdi': VirtualDiskImage,
'vmdk': VirtualMachineDisk,
'ebs': EBSVolume
}
# Create the volume with the partition map as an argument
return volume_backings.get(data['backing'])(partition_map)

View file

@ -1,8 +1,12 @@
class VolumeError(Exception):
"""Raised when an error occurs while interacting with the volume
"""
pass
class PartitionError(Exception):
"""Raised when an error occurs while interacting with the partitions on the volume
"""
pass

View file

@ -6,25 +6,50 @@ from ..exceptions import PartitionError
class AbstractPartitionMap(FSMProxy):
"""Abstract representation of a partiton map
This class is a finite state machine and represents the state of the real partition map
"""
__metaclass__ = ABCMeta
# States the partition map can be in
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
]
def __init__(self, bootloader):
"""
Args:
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
# Create the configuration for the state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
super(AbstractPartitionMap, self).__init__(cfg)
def is_blocking(self):
"""Returns whether the partition map is blocking volume detach operations
Returns:
bool.
"""
return self.fsm.current == 'mapped'
def get_total_size(self):
"""Returns the total size the partitions occupy
Returns:
Bytes. The size of all the partitions
"""
# We just need the endpoint of the last partition
return self.partitions[-1].get_end()
def create(self, volume):
"""Creates the partition map
Args:
volume (Volume): The volume to create the partition map on
"""
self.fsm.create(volume=volume)
@abstractmethod
@ -32,11 +57,21 @@ class AbstractPartitionMap(FSMProxy):
pass
def map(self, volume):
"""Maps the partition map to device nodes
Args:
volume (Volume): The volume the partition map resides on
"""
self.fsm.map(volume=volume)
def _before_map(self, event):
"""
Raises:
PartitionError
"""
volume = event.volume
try:
# Ask kpartx how the partitions will be mapped before actually attaching them.
mappings = log_check_call(['kpartx', '-l', volume.device_path])
import re
regexp = re.compile('^(?P<name>.+[^\d](?P<p_idx>\d+)) : '
@ -45,6 +80,7 @@ class AbstractPartitionMap(FSMProxy):
.format(device_path=volume.device_path))
log_check_call(['kpartx', '-a', volume.device_path])
import os.path
# Run through the kpartx output and map the paths to the partitions
for mapping in mappings:
match = regexp.match(mapping)
if match is None:
@ -53,11 +89,13 @@ class AbstractPartitionMap(FSMProxy):
p_idx = int(match.group('p_idx')) - 1
self.partitions[p_idx].map(partition_path)
# Check if any partition was not mapped
for idx, partition in enumerate(self.partitions):
if partition.fsm.current not in ['mapped', 'formatted']:
raise PartitionError('kpartx did not map partition #{idx}'.format(idx=idx + 1))
except PartitionError as e:
# Revert any mapping and reraise the error
for partition in self.partitions:
if not partition.fsm.can('unmap'):
partition.unmap()
@ -65,14 +103,26 @@ class AbstractPartitionMap(FSMProxy):
raise e
def unmap(self, volume):
"""Unmaps the partition
Args:
volume (Volume): The volume to unmap the partition map from
"""
self.fsm.unmap(volume=volume)
def _before_unmap(self, event):
"""
Raises:
PartitionError
"""
volume = event.volume
# Run through all partitions before unmapping and make sure they can all be unmapped
for partition in self.partitions:
if partition.fsm.cannot('unmap'):
msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition)
raise PartitionError(msg)
# Actually unmap the partitions
log_check_call(['kpartx', '-d', volume.device_path])
# Call unmap on all partitions
for partition in self.partitions:
partition.unmap()

View file

@ -5,23 +5,38 @@ from common.tools import log_check_call
class GPTPartitionMap(AbstractPartitionMap):
"""Represents a GPT partition map
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
# List of partitions
self.partitions = []
# Returns the last partition unless there is none
def last_partition():
return self.partitions[-1] if len(self.partitions) > 0 else None
# GPT offset
gpt_offset = Bytes('17KiB')
# If we are using the grub bootloader we need to create an unformatted partition
# at the beginning of the map. Its size is 1007kb, which we will steal from the
# next partition.
if bootloader == 'grub':
from ..partitions.unformatted import UnformattedPartition
self.grub_boot = UnformattedPartition(Bytes('1007KiB'), last_partition())
self.grub_boot.offset = gpt_offset
# Mark the partition as a bios_grub partition
self.grub_boot.flags.append('bios_grub')
self.partitions.append(self.grub_boot)
# The boot and swap partitions are optional
if 'boot' in data:
self.boot = GPTPartition(Bytes(data['boot']['size']),
data['boot']['filesystem'], data['boot'].get('format_command', None),
@ -35,6 +50,8 @@ class GPTPartitionMap(AbstractPartitionMap):
'root', last_partition())
self.partitions.append(self.root)
# Depending on whether we have a grub boot partition
# we will need to set the offset accordingly.
if hasattr(self, 'grub_boot'):
self.partitions[1].size -= gpt_offset
self.partitions[1].size -= self.grub_boot.size
@ -45,8 +62,13 @@ class GPTPartitionMap(AbstractPartitionMap):
super(GPTPartitionMap, self).__init__(bootloader)
def _before_create(self, event):
"""Creates the partition map
"""
volume = event.volume
# Disk alignment still plays a role in virtualized environment,
# but I honestly have no clue as to what best practice is here, so we choose 'none'
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
'--', 'mklabel', 'gpt'])
# Create the partitions
for partition in self.partitions:
partition.create(volume)

View file

@ -5,14 +5,25 @@ from common.tools import log_check_call
class MSDOSPartitionMap(AbstractPartitionMap):
"""Represents a MS-DOS partition map
Sometimes also called MBR (but that confuses the hell out of me, so ms-dos it is)
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
# List of partitions
self.partitions = []
# Returns the last partition unless there is none
def last_partition():
return self.partitions[-1] if len(self.partitions) > 0 else None
# The boot and swap partitions are optional
if 'boot' in data:
self.boot = MSDOSPartition(Bytes(data['boot']['size']),
data['boot']['filesystem'], data['boot'].get('format_command', None),
@ -26,8 +37,11 @@ class MSDOSPartitionMap(AbstractPartitionMap):
last_partition())
self.partitions.append(self.root)
# Mark boot as the boot partition, or root, if boot does not exist
getattr(self, 'boot', self.root).flags.append('boot')
# If we are using the grub bootloader, we will need to create a 2 MB offset at the beginning
# of the partitionmap and steal it from the first partition
if bootloader == 'grub':
self.partitions[0].offset = Bytes('2MiB')
self.partitions[0].size -= self.partitions[0].offset
@ -36,7 +50,10 @@ class MSDOSPartitionMap(AbstractPartitionMap):
def _before_create(self, event):
volume = event.volume
# Disk alignment still plays a role in virtualized environment,
# but I honestly have no clue as to what best practice is here, so we choose 'none'
log_check_call(['parted', '--script', '--align', 'none', volume.device_path,
'--', 'mklabel', 'msdos'])
# Create the partitions
for partition in self.partitions:
partition.create(volume)

View file

@ -2,15 +2,35 @@ from ..partitions.single import SinglePartition
class NoPartitions(object):
"""Represents a virtual 'NoPartitions' partitionmap.
This virtual partition map exists because it is easier for tasks to
simply always deal with partition maps and then let the base abstract that away.
"""
def __init__(self, data, bootloader):
"""
Args:
data (dict): volume.partitions part of the manifest
bootloader (str): Name of the bootloader we will use for bootstrapping
"""
from common.bytes import Bytes
# In the NoPartitions partitions map we only have a single 'partition'
self.root = SinglePartition(Bytes(data['root']['size']),
data['root']['filesystem'], data['root'].get('format_command', None))
self.partitions = [self.root]
def is_blocking(self):
"""Returns whether the partition map is blocking volume detach operations
Returns:
bool.
"""
return self.root.fsm.current == 'mounted'
def get_total_size(self):
"""Returns the total size the partitions occupy
Returns:
Bytes. The size of all the partitions
"""
return self.root.get_end()

View file

@ -6,9 +6,13 @@ from common.fsm_proxy import FSMProxy
class AbstractPartition(FSMProxy):
"""Abstract representation of a partiton
This class is a finite state machine and represents the state of the real partition
"""
__metaclass__ = ABCMeta
# Our states
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'created'},
{'name': 'format', 'src': 'created', 'dst': 'formatted'},
{'name': 'mount', 'src': 'formatted', 'dst': 'mounted'},
@ -16,13 +20,26 @@ class AbstractPartition(FSMProxy):
]
class Mount(object):
"""Represents a mount into the partition
"""
def __init__(self, source, destination, opts):
"""
Args:
source (str,AbstractPartition): The path from where we mount or a partition
destination (str): The path of the mountpoint
opts (list): List of options to pass to the mount command
"""
self.source = source
self.destination = destination
self.opts = opts
def mount(self, prefix):
"""Performs the mount operation or forwards it to another partition
Args:
prefix (str): Path prefix of the mountpoint
"""
mount_dir = os.path.join(prefix, self.destination)
# If the source is another partition, we tell that partition to mount itself
if isinstance(self.source, AbstractPartition):
self.source.mount(destination=mount_dir)
else:
@ -30,6 +47,9 @@ class AbstractPartition(FSMProxy):
self.mount_dir = mount_dir
def unmount(self):
"""Performs the unmount operation or asks the partition to unmount itself
"""
# If its a partition, it can unmount itself
if isinstance(self.source, AbstractPartition):
self.source.unmount()
else:
@ -37,16 +57,30 @@ class AbstractPartition(FSMProxy):
del self.mount_dir
def __init__(self, size, filesystem, format_command):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
"""
self.size = size
self.filesystem = filesystem
self.format_command = format_command
# Path to the partition
self.device_path = None
# Dictionary with mount points as keys and Mount objects as values
self.mounts = {}
# Create the configuration for our state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}
super(AbstractPartition, self).__init__(cfg)
def get_uuid(self):
"""Gets the UUID of the partition
Returns:
str. The UUID of the partition
"""
[uuid] = log_check_call(['blkid', '-s', 'UUID', '-o', 'value', self.device_path])
return uuid
@ -55,9 +89,17 @@ class AbstractPartition(FSMProxy):
pass
def get_end(self):
"""Gets the end of the partition
Returns:
Bytes. The end of the partition
"""
return self.get_start() + self.size
def _before_format(self, e):
"""Formats the partition
"""
# If there is no explicit format_command define we simply call mkfs.fstype
if self.format_command is None:
format_command = ['mkfs.{fs}', '{device_path}']
else:
@ -67,29 +109,57 @@ class AbstractPartition(FSMProxy):
'size': self.size,
}
command = map(lambda part: part.format(**variables), format_command)
# Format the partition
log_check_call(command)
def _before_mount(self, e):
"""Mount the partition
"""
log_check_call(['mount', '--types', self.filesystem, self.device_path, e.destination])
self.mount_dir = e.destination
def _after_mount(self, e):
"""Mount any mounts associated with this partition
"""
# Make sure we mount in ascending order of mountpoint path length
# This ensures that we don't mount /dev/pts before we mount /dev
for destination in sorted(self.mounts.iterkeys(), key=len):
self.mounts[destination].mount(self.mount_dir)
def _before_unmount(self, e):
"""Unmount any mounts associated with this partition
"""
# Unmount the mounts in descending order of mounpoint path length
# You cannot unmount /dev before you have unmounted /dev/pts
for destination in sorted(self.mounts.iterkeys(), key=len, reverse=True):
self.mounts[destination].unmount()
log_check_call(['umount', self.mount_dir])
del self.mount_dir
def add_mount(self, source, destination, opts=[]):
"""Associate a mount with this partition
Automatically mounts it
Args:
source (str,AbstractPartition): The source of the mount
destination (str): The path to the mountpoint
opts (list): Any options that should be passed to the mount command
"""
# Create a new mount object, mount it if the partition is mounted and put it in the mounts dict
mount = self.Mount(source, destination, opts)
if self.fsm.current == 'mounted':
mount.mount(self.mount_dir)
self.mounts[destination] = mount
def remove_mount(self, destination):
"""Remove a mount from this partition
Automatically unmounts it
Args:
destination (str): The mountpoint path of the mount that should be removed
"""
# Unmount the mount if the partition is mounted and delete it from the mounts dict
# If the mount is already unmounted and the source is a partition, this will raise an exception
if self.fsm.current == 'mounted':
self.mounts[destination].unmount()
del self.mounts[destination]

View file

@ -2,7 +2,11 @@ from abstract import AbstractPartition
class BasePartition(AbstractPartition):
"""Represents a partition that is actually a partition (and not a virtual one like 'Single')
"""
# Override the states of the abstract partition
# A real partition can be mapped and unmapped
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'format', 'src': 'mapped', 'dst': 'formatted'},
@ -15,45 +19,87 @@ class BasePartition(AbstractPartition):
]
def __init__(self, size, filesystem, format_command, previous):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
previous (BasePartition): The partition that preceeds this one
"""
# By saving the previous partition we have
# a linked list that partitions can go backwards in to find the first partition.
self.previous = previous
from common.bytes import Bytes
# Initialize the offset to 0 bytes, may be changed later
self.offset = Bytes(0)
# List of flags that parted should put on the partition
self.flags = []
super(BasePartition, self).__init__(size, filesystem, format_command)
def create(self, volume):
"""Creates the partition
Args:
volume (Volume): The volume to create the partition on
"""
self.fsm.create(volume=volume)
def get_index(self):
"""Gets the index of this partition in the partition map
Returns:
int. The index of the partition in the partition map
"""
if self.previous is None:
# Partitions are 1 indexed
return 1
else:
# Recursive call to the previous partition, walking up the chain...
return self.previous.get_index() + 1
def get_start(self):
"""Gets the starting byte of this partition
Returns:
Bytes. The starting byte of this partition
"""
if self.previous is None:
# If there is no previous partition, this partition begins at the offset
return self.offset
else:
# Get the end of the previous partition and add the offset of this partition
return self.previous.get_end() + self.offset
def map(self, device_path):
"""Maps the partition to a device_path
Args:
device_path (str): The device patht his partition should be mapped to
"""
self.fsm.map(device_path=device_path)
def _before_create(self, e):
"""Creates the partition
"""
from common.tools import log_check_call
# The create command is failry simple, start and end are just Bytes objects coerced into strings
create_command = ('mkpart primary {start} {end}'
.format(start=str(self.get_start()),
end=str(self.get_end())))
# Create the partition
log_check_call(['parted', '--script', '--align', 'none', e.volume.device_path,
'--', create_command])
# Set any flags on the partition
for flag in self.flags:
log_check_call(['parted', '--script', e.volume.device_path,
'--', ('set {idx} {flag} on'
.format(idx=str(self.get_index()), flag=flag))])
def _before_map(self, e):
# Set the device path
self.device_path = e.device_path
def _before_unmap(self, e):
# When unmapped, the device_path ifnromation becomes invalid, so we delete it
self.device_path = None

View file

@ -3,12 +3,23 @@ from base import BasePartition
class GPTPartition(BasePartition):
"""Represents a GPT partition
"""
def __init__(self, size, filesystem, format_command, name, previous):
"""
Args:
size (Bytes): Size of the partition
filesystem (str): Filesystem the partition should be formatted with
format_command (list): Optional format command, valid variables are fs, device_path and size
name (str): The name of the partition
previous (BasePartition): The partition that preceeds this one
"""
self.name = name
super(GPTPartition, self).__init__(size, filesystem, format_command, previous)
def _before_create(self, e):
# Create the partition and then set the name of the partition afterwards
super(GPTPartition, self)._before_create(e)
# partition name only works for gpt, for msdos that becomes the part-type (primary, extended, logical)
name_command = ('name {idx} {name}'

View file

@ -3,8 +3,15 @@ from gpt import GPTPartition
class GPTSwapPartition(GPTPartition):
"""Represents a GPT swap partition
"""
def __init__(self, size, previous):
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(GPTSwapPartition, self).__init__(size, 'swap', None, 'swap', previous)
def _before_format(self, e):

View file

@ -2,4 +2,6 @@ from base import BasePartition
class MSDOSPartition(BasePartition):
"""Represents an MS-DOS partition
"""
pass

View file

@ -3,8 +3,15 @@ from msdos import MSDOSPartition
class MSDOSSwapPartition(MSDOSPartition):
"""Represents a MS-DOS swap partition
"""
def __init__(self, size, previous):
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(MSDOSSwapPartition, self).__init__(size, 'swap', None, previous)
def _before_format(self, e):

View file

@ -2,7 +2,15 @@ from abstract import AbstractPartition
class SinglePartition(AbstractPartition):
"""Represents a single virtual partition on an unpartitioned volume
"""
def get_start(self):
"""Gets the starting byte of this partition
Returns:
Bytes. The starting byte of this partition
"""
from common.bytes import Bytes
# On an unpartitioned volume there is no offset and no previous partition
return Bytes(0)

View file

@ -2,11 +2,20 @@ from base import BasePartition
class UnformattedPartition(BasePartition):
"""Represents an unformatted partition
It cannot be mounted
"""
# The states for our state machine. It can only be mapped, not mounted.
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},
{'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},
{'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},
]
def __init__(self, size, previous):
"""
Args:
size (Bytes): Size of the partition
previous (BasePartition): The partition that preceeds this one
"""
super(UnformattedPartition, self).__init__(size, None, None, previous)

View file

@ -6,9 +6,13 @@ from partitionmaps.none import NoPartitions
class Volume(FSMProxy):
"""Represents an abstract volume.
This class is a finite state machine and represents the state of the real volume.
"""
__metaclass__ = ABCMeta
# States this volume can be in
events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'detached'},
{'name': 'attach', 'src': 'detached', 'dst': 'attached'},
{'name': 'link_dm_node', 'src': 'attached', 'dst': 'linked'},
@ -18,33 +22,76 @@ class Volume(FSMProxy):
]
def __init__(self, partition_map):
"""
Args:
partition_map (PartitionMap): The partition map for the volume
"""
# Path to the volume
self.device_path = None
self.real_device_path = None
# The partition map
self.partition_map = partition_map
# The size of the volume as reported by the partition map
self.size = self.partition_map.get_total_size()
# Before detaching, check that nothing would block the detachment
callbacks = {'onbeforedetach': self._check_blocking}
if isinstance(self.partition_map, NoPartitions):
# When the volume has no partitions, the virtual root partition path is equal to that of the volume
# Update that path whenever the path to the volume changes
def set_dev_path(e):
self.partition_map.root.device_path = self.device_path
callbacks['onafterattach'] = set_dev_path
callbacks['onlink_dm_node'] = set_dev_path
callbacks['onunlink_dm_node'] = set_dev_path
# Create the configuration for our finite state machine
cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': callbacks}
super(Volume, self).__init__(cfg)
def _after_create(self, e):
"""
Args:
e (_e_obj): Event object containing arguments to create()
"""
if isinstance(self.partition_map, NoPartitions):
# When the volume has no partitions, the virtual root partition
# is essentially created when the volume is created, forward that creation event.
self.partition_map.root.create()
def _check_blocking(self, e):
"""Checks whether the volume is blocked
Args:
e (_e_obj): Event object containing arguments to create()
Raises:
VolumeError
"""
# Only the partition map can block the volume
if self.partition_map.is_blocking():
raise VolumeError('The partitionmap prevents the detach procedure')
def _before_link_dm_node(self, e):
"""Links the volume using the device mapper
This allows us to create a 'window' into the volume that acts like a volum in itself.
Mainly it is used to fool grub into thinking that it is working with a real volume,
rather than a loopback device or a network block device.
Args:
e (_e_obj): Event object containing arguments to create()
Arguments are:
logical_start_sector (int): The sector the volume should start at in the new volume
start_sector (int): The offset at which the volume should begin to be mapped in the new volume
sectors (int): The number of sectors that should be mapped
Read more at: http://manpages.debian.org/cgi-bin/man.cgi?query=dmsetup&apropos=0&sektion=0&manpath=Debian+7.0+wheezy&format=html&locale=en
Raises:
VolumeError
"""
import os.path
from common.fs import get_partitions
# Fetch information from /proc/partitions
proc_partitions = get_partitions()
device_name = os.path.basename(self.device_path)
device_partition = proc_partitions[device_name]
@ -55,8 +102,10 @@ class Volume(FSMProxy):
# The offset at which the volume should begin to be mapped in the new volume
start_sector = getattr(e, 'start_sector', 0)
# The number of sectors that should be mapped
sectors = getattr(e, 'sectors', int(self.size / 512) - start_sector)
# This is the table we send to dmsetup, so that it may create a decie mapping for us.
table = ('{log_start_sec} {sectors} linear {major}:{minor} {start_sec}'
.format(log_start_sec=logical_start_sector,
sectors=sectors,
@ -65,6 +114,7 @@ class Volume(FSMProxy):
start_sec=start_sector))
import string
import os.path
# Figure out the device letter and path
for letter in string.ascii_lowercase:
dev_name = 'vd' + letter
dev_path = os.path.join('/dev/mapper', dev_name)
@ -76,12 +126,21 @@ class Volume(FSMProxy):
if not hasattr(self, 'dm_node_name'):
raise VolumeError('Unable to find a free block device path for mounting the bootstrap volume')
# Create the device mapping
log_check_call(['dmsetup', 'create', self.dm_node_name], table)
# Update the device_path but remember the old one for when we unlink the volume again
self.unlinked_device_path = self.device_path
self.device_path = self.dm_node_path
def _before_unlink_dm_node(self, e):
"""Unlinks the device mapping
Args:
e (_e_obj): Event object containing arguments to create()
"""
log_check_call(['dmsetup', 'remove', self.dm_node_name])
# Delete the no longer valid information
del self.dm_node_name
del self.dm_node_path
# Reset the device_path
self.device_path = self.unlinked_device_path

View file

@ -1,7 +1,20 @@
"""This module holds functions and classes responsible for formatting the log output
both to a file and to the console.
.. module:: log
"""
import logging
def get_logfile_path(manifest_path):
"""Returns the path to a logfile given a manifest
The logfile name is constructed from the current timestamp and the basename of the manifest
Args:
manifest_path (str): The path to the manifest
Returns:
str. The path to the logfile
"""
import os.path
from datetime import datetime
@ -13,17 +26,31 @@ def get_logfile_path(manifest_path):
def setup_logger(logfile=None, debug=False):
"""Sets up the python logger to log to both a file and the console
Args:
logfile (str): Path to a logfile
debug (bool): Whether to log debug output to the console
"""
root = logging.getLogger()
# Make sure all logging statements are processed by our handlers, they decide the log level
root.setLevel(logging.NOTSET)
# Create a file log handler
file_handler = logging.FileHandler(logfile)
# Absolute timestamps are rather useless when bootstrapping, it's much more interesting
# to see how long things take, so we log in a relative format instead
file_handler.setFormatter(FileFormatter('[%(relativeCreated)s] %(levelname)s: %(message)s'))
# The file log handler always logs everything
file_handler.setLevel(logging.DEBUG)
root.addHandler(file_handler)
# Create a console log handler
import sys
console_handler = logging.StreamHandler(sys.stderr)
# We want to colorize the output to the console, so we add a formatter
console_handler.setFormatter(ConsoleFormatter())
# Set the log level depending on the debug argument
if debug:
console_handler.setLevel(logging.DEBUG)
else:
@ -32,6 +59,8 @@ def setup_logger(logfile=None, debug=False):
class ConsoleFormatter(logging.Formatter):
"""Formats log statements for the console
"""
level_colors = {logging.ERROR: 'red',
logging.WARNING: 'magenta',
logging.INFO: 'blue',
@ -39,11 +68,15 @@ class ConsoleFormatter(logging.Formatter):
def format(self, record):
if(record.levelno in self.level_colors):
# Colorize the message if we have a color for it (DEBUG has no color)
from termcolor import colored
record.msg = colored(record.msg, self.level_colors[record.levelno])
return super(ConsoleFormatter, self).format(record)
class FileFormatter(logging.Formatter):
"""Formats log statements for output to file
Currently this is just a stub
"""
def format(self, record):
return super(FileFormatter, self).format(record)

View file

@ -1,20 +1,34 @@
"""Main module containing all the setup necessary for running the bootstrapping process
.. module:: main
"""
import logging
log = logging.getLogger(__name__)
def main():
"""Main function for invoking the bootstrap process
Raises:
Exception
"""
# Get the commandline arguments
import os
args = get_args()
# Require root privileges, except when doing a dry-run where they aren't needed
if os.geteuid() != 0 and not args.dry_run:
raise Exception('This program requires root privileges.')
# Setup logging
import log
logfile = log.get_logfile_path(args.manifest)
log.setup_logger(logfile=logfile, debug=args.debug)
# Everything has been set up, begin the bootstrapping process
run(args)
def get_args():
"""Creates an argument parser and returns the arguments it has parsed
"""
from argparse import ArgumentParser
parser = ArgumentParser(description='Bootstrap Debian for the cloud.')
parser.add_argument('--debug', action='store_true',
@ -28,31 +42,57 @@ def get_args():
def run(args):
"""Runs the bootstrapping process
Args:
args (dict): Dictionary of arguments from the commandline
"""
# Load the manifest
from manifest import Manifest
manifest = Manifest(args.manifest)
# Get the tasklist
from tasklist import TaskList
tasklist = TaskList()
# 'resolve_tasks' is the name of the function to call on the provider and plugins
tasklist.load('resolve_tasks', manifest)
# Create the bootstrap information object that'll be used throughout the bootstrapping process
from bootstrapinfo import BootstrapInformation
bootstrap_info = BootstrapInformation(manifest=manifest, debug=args.debug)
try:
# Run all the tasks the tasklist has gathered
tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
# We're done! :-)
log.info('Successfully completed bootstrapping')
except (Exception, KeyboardInterrupt) as e:
# When an error occurs, log it and begin rollback
log.exception(e)
if args.pause_on_error:
# The --pause-on-error is useful when the user wants to inspect the volume before rollback
raw_input('Press Enter to commence rollback')
log.error('Rolling back')
# Create a new tasklist to gather the necessary tasks for rollback
rollback_tasklist = TaskList()
# Create a useful little function for the provider and plugins to use,
# when figuring out what tasks should be added to the rollback list.
def counter_task(task, counter):
"""counter_task() adds the second argument to the rollback tasklist
if the first argument is present in the list of completed tasks
Args:
task (Task): The task to look for in the completed tasks list
counter (Task): The task to add to the rollback tasklist
"""
if task in tasklist.tasks_completed and counter not in tasklist.tasks_completed:
rollback_tasklist.tasks.add(counter)
# Ask the provider and plugins for tasks they'd like to add to the rollback tasklist
# Any additional arguments beyond the first two are passed directly to the provider and plugins
rollback_tasklist.load('resolve_rollback_tasks', manifest, counter_task)
# Run the rollback tasklist
rollback_tasklist.run(info=bootstrap_info, dry_run=args.dry_run)
log.info('Successfully completed rollback')

View file

@ -1,22 +1,48 @@
"""The Manifest module contains the manifest that providers and plugins use
to determine which tasks should be added to the tasklist, what arguments various
invocations should have etc..
.. module:: manifest
"""
from common.tools import load_json
import logging
log = logging.getLogger(__name__)
class Manifest(object):
"""This class holds all the information that providers and plugins need
to perform the bootstrapping process. All actions that are taken originate from
here. The manifest shall not be modified after it has been loaded.
Currently, immutability is not enforced and it would require a fair amount of code
to enforce it, instead we just rely on tasks behaving properly.
"""
def __init__(self, path):
"""Initializer: Given a path we load, validate and parse the manifest.
Args:
path (str): The path to the manifest
"""
self.path = path
self.load()
self.validate()
self.parse()
def load(self):
"""Loads the manifest.
This function not only reads the manifest but also loads the specified provider and plugins.
Once they are loaded, the initialize() function is called on each of them (if it exists).
The provider must have an initialize function.
"""
# Load the manifest JSON using the loader in common.tools
# It strips comments (which are invalid in strict json) before loading the data.
self.data = load_json(self.path)
# Get the provider name from the manifest and load the corresponding module
provider_modname = 'providers.{provider}'.format(provider=self.data['provider'])
log.debug('Loading provider `{modname}\''.format(modname=provider_modname))
# Create a modules dict that contains the loaded provider and plugins
self.modules = {'provider': __import__(provider_modname, fromlist=['providers']),
'plugins': [],
}
# Run through all the plugins mentioned in the manifest and load them
if 'plugins' in self.data:
for plugin_name, plugin_data in self.data['plugins'].iteritems():
modname = 'plugins.{plugin}'.format(plugin=plugin_name)
@ -24,37 +50,62 @@ class Manifest(object):
plugin = __import__(modname, fromlist=['plugins'])
self.modules['plugins'].append(plugin)
# Run the initialize function on the provider and plugins
self.modules['provider'].initialize()
for module in self.modules['plugins']:
# Plugins are not required to have an initialize function
init = getattr(module, 'initialize', None)
if callable(init):
init()
def validate(self):
"""Validates the manifest using the base, provider and plugin validation functions.
Plugins are not required to have a validate_manifest function
"""
from . import validate_manifest
# Validate the manifest with the base validation function in __init__
validate_manifest(self.data, self.schema_validator, self.validation_error)
# Run the provider validation
self.modules['provider'].validate_manifest(self.data, self.schema_validator, self.validation_error)
# Run the validation function for any plugin that has it
for plugin in self.modules['plugins']:
validate = getattr(plugin, 'validate_manifest', None)
if callable(validate):
validate(self.data, self.schema_validator, self.validation_error)
def parse(self):
"""Parses the manifest.
Well... "parsing" is a big word.
The function really just sets up some convenient attributes so that tasks
don't have to access information with info.manifest.data['section']
but can do it with info.manifest.section.
"""
self.provider = self.data['provider']
self.bootstrapper = self.data['bootstrapper']
self.image = self.data['image']
self.volume = self.data['volume']
self.system = self.data['system']
# The packages and plugins section is not required
self.packages = self.data['packages'] if 'packages' in self.data else {}
self.plugins = self.data['plugins'] if 'plugins' in self.data else {}
def load_json(self, path):
"""Loads JSON. Unused and will be removed.
Use common.tools.load_json instead
"""
import json
from minify_json import json_minify
with open(path) as stream:
return json.loads(json_minify(stream.read(), False))
def schema_validator(self, data, schema_path):
"""This convenience function is passed around to all the validation functions
so that they may run a json-schema validation by giving it the data and a path to the schema.
Args:
data (dict): Data to validate (normally the manifest data)
schema_path (str): Path to the json-schema to use for validation
"""
import jsonschema
schema = load_json(schema_path)
try:
@ -63,5 +114,12 @@ class Manifest(object):
self.validation_error(e.message, e.path)
def validation_error(self, message, json_path=None):
"""This function is passed to all validation functions so that they may
raise a validation error because a custom validation of the manifest failed.
Args:
message (str): Message to user about the error
json_path (list): A path to the location in the manifest where the error occurred
"""
from common.exceptions import ManifestError
raise ManifestError(message, self.path, json_path)

View file

@ -1,16 +1,33 @@
class Phase(object):
"""The Phase class represents a phase a task may be in.
It has no function other than to act as an anchor in the task graph.
All phases are instantiated in common.phases
"""
def __init__(self, name, description):
# The name of the phase
self.name = name
# The description of the phase (currently not used anywhere)
self.description = description
def pos(self):
"""Gets the position of the phase
Returns:
int. The positional index of the phase in relation to the other phases
"""
from common.phases import order
return next(i for i, phase in enumerate(order) if phase is self)
def __cmp__(self, other):
"""Compares the phase order in relation to the other phases
"""
return self.pos() - other.pos()
def __str__(self):
"""String representation of the phase, the name suffices
Returns:
string.
"""
return self.name

View file

@ -1,8 +1,12 @@
class PackageError(Exception):
"""Raised when an error occurrs while handling the packageslist
"""
pass
class SourceError(Exception):
"""Raised when an error occurs while handling the sourceslist
"""
pass

View file

@ -2,38 +2,84 @@ from exceptions import PackageError
class PackageList(object):
"""Represents a list of packages
"""
class Remote(object):
"""A remote package with an optional target
"""
def __init__(self, name, target):
"""
Args:
name (str): The name of the package
target (str): The name of the target release
"""
self.name = name
self.target = target
def __str__(self):
"""Converts the package into somehting that apt-get install can parse
Returns:
string.
"""
if self.target is None:
return self.name
else:
return '{name}/{target}'.format(name=self.name, target=self.target)
class Local(object):
"""A local package
"""
def __init__(self, path):
"""
Args:
path (str): The path to the local package
"""
self.path = path
def __str__(self):
"""
Returns:
string. The path to the local package
"""
return self.path
def __init__(self, manifest_vars, source_lists):
"""
Args:
manifest_vars (dict): The manifest variables
source_lists (SourceLists): The sourcelists for apt
"""
self.manifest_vars = manifest_vars
self.source_lists = source_lists
# The default_target is the release we are bootstrapping
self.default_target = '{system.release}'.format(**self.manifest_vars)
# The list of packages that should be installed, this is not a set.
# We want to preserve the order in which the packages were added so that local
# packages may be installed in the correct order.
self.install = []
# A function that filters the install list and only returns remote packages
self.remote = lambda: filter(lambda x: isinstance(x, self.Remote), self.install)
def add(self, name, target=None):
"""Adds a package to the install list
Args:
name (str): The name of the package to install, may contain manifest vars references
target (str): The name of the target release for the package, may contain manifest vars references
Raises:
PackageError
"""
name = name.format(**self.manifest_vars)
if target is not None:
target = target.format(**self.manifest_vars)
# Check if the package has already been added.
# If so, make sure it's the same target and raise a PackageError otherwise
package = next((pkg for pkg in self.remote() if pkg.name == name), None)
if package is not None:
# It's the same target if the target names match or one of the targets is None
# and the other is the default target.
same_target = package.target == target
same_target = same_target or package.target is None and target == self.default_target
same_target = same_target or package.target == self.default_target and target is None
@ -42,8 +88,10 @@ class PackageList(object):
'but with target release `{target}\' instead of `{add_target}\''
.format(name=name, target=package.target, add_target=target))
raise PackageError(msg)
# The package has already been added, skip the checks below
return
# Check if the target exists in the sources list, raise a PackageError if not
check_target = target
if check_target is None:
check_target = self.default_target
@ -51,8 +99,17 @@ class PackageList(object):
msg = ('The target release {target} was not found in the sources list').format(target=check_target)
raise PackageError(msg)
# Note that we maintain the target value even if it is none.
# This allows us to preserve the semantics of the default target when calling apt-get install
# Why? Try installing nfs-client/wheezy, you can't. It's a virtual package for which you cannot define
# a target release. Only `apt-get install nfs-client` works.
self.install.append(self.Remote(name, target))
def add_local(self, package_path):
"""Adds a local package to the installation list
Args:
package_path (str): Path to the local package, may contain manifest vars references
"""
package_path = package_path.format(**self.manifest_vars)
self.install.append(self.Local(package_path))

View file

@ -1,12 +1,27 @@
class SourceLists(object):
"""Represents a list of sources lists for apt
"""
def __init__(self, manifest_vars):
"""
Args:
manifest_vars (dict): The manifest variables
"""
# A dictionary with the name of the file in sources.list.d as the key
# That values are lists of Source objects
self.sources = {}
# Save the manifest variables, we need the later on
self.manifest_vars = manifest_vars
def add(self, name, line):
"""Adds a source to the apt sources list
Args:
name (str): Name of the file in sources.list.d, may contain manifest vars references
line (str): The line for the source file, may contain manifest vars references
"""
name = name.format(**self.manifest_vars)
line = line.format(**self.manifest_vars)
if name not in self.sources:
@ -14,7 +29,16 @@ class SourceLists(object):
self.sources[name].append(Source(line))
def target_exists(self, target):
"""Checks whether the target exists in the sources list
Args:
target (str): Name of the target to check for, may contain manifest vars references
Returns:
bool. Whether the target exists
"""
target = target.format(**self.manifest_vars)
# Run through all the sources and return True if the target exists
for lines in self.sources.itervalues():
if target in (source.distribution for source in lines):
return True
@ -22,8 +46,20 @@ class SourceLists(object):
class Source(object):
"""Represents a single source line
"""
def __init__(self, line):
"""
Args:
line (str): A apt source line
Raises:
SourceError
"""
# Parse the source line and populate the class attributes with it
# The format is taken from `man sources.list`
# or: http://manpages.debian.org/cgi-bin/man.cgi?sektion=5&query=sources.list&apropos=0&manpath=sid&locale=en
import re
regexp = re.compile('^(?P<type>deb|deb-src)\s+'
'(\[\s*(?P<options>.+\S)?\s*\]\s+)?'
@ -45,6 +81,12 @@ class Source(object):
self.components = re.sub(' +', ' ', match['components']).split(' ')
def __str__(self):
"""Convert the object into a source line
This is pretty much the reverse of what we're doing in the initialization function.
Returns:
string.
"""
options = ''
if len(self.options) > 0:
options = ' [{options}]'.format(options=' '.join(self.options))

View file

@ -1,17 +1,37 @@
class Task(object):
"""The task class represents are task that can be run.
It is merely a wrapper for the run function and should never be instantiated.
"""
# The phase this task is located in.
phase = None
# List of tasks that should run before this task is run
predecessors = []
# List of tasks that should run after this task has run
successors = []
class __metaclass__(type):
"""Metaclass to control how the class is coerced into a string
"""
def __repr__(cls):
"""
Returns:
string.
"""
return '{module}.{task}'.format(module=cls.__module__, task=cls.__name__)
def __str__(cls):
"""
Returns:
string.
"""
return repr(cls)
@classmethod
def run(cls, info):
"""The run function, all work is done inside this function
Args:
info (BootstrapInformation): The bootstrap info object
"""
pass

View file

@ -1,35 +1,68 @@
"""The tasklist module contains the TaskList class.
.. module:: tasklist
"""
from common.exceptions import TaskListError
import logging
log = logging.getLogger(__name__)
class TaskList(object):
"""The tasklist class aggregates all tasks that should be run
and orders them according to their dependencies.
"""
def __init__(self):
self.tasks = set()
self.tasks_completed = []
def load(self, function, manifest, *args):
"""Calls 'function' on the provider and all plugins that have been loaded by the manifest.
Any additional arguments are passed directly to 'function'.
The function that is called shall accept the taskset as its first argument and the manifest
as its second argument.
Args:
function (str): Name of the function to call
manifest (Manifest): The manifest
*args: Additional arguments that should be passed to the function that is called
"""
# Call 'function' on the provider
getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args)
for plugin in manifest.modules['plugins']:
# Plugins har not required to have whatever function we call
fn = getattr(plugin, function, None)
if callable(fn):
fn(self.tasks, manifest, *args)
def run(self, info={}, dry_run=False):
"""Converts the taskgraph into a list and runs all tasks in that list
Args:
info (dict): The bootstrap information object
dry_run (bool): Whether to actually run the tasks or simply step through them
"""
# Create a list for us to run
task_list = self.create_list()
# Output the tasklist
log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list))))
for task in task_list:
# Tasks are not required to have a description
if hasattr(task, 'description'):
log.info(task.description)
else:
# If there is no description, simply coerce the task into a string and print its name
log.info('Running {task}'.format(task=task))
if not dry_run:
# Run the task
task.run(info)
# Remember which tasks have been run for later use (e.g. when rolling back, because of an error)
self.tasks_completed.append(task)
def create_list(self):
"""Creates a list of all the tasks that should be run.
"""
from common.phases import order
# Get a hold of all tasks
tasks = self.get_all_tasks()
@ -52,9 +85,11 @@ class TaskList(object):
# Map the successors to the task
graph[task] = successors
# Use the strongly connected components algorithm to check for cycles in our task graph
components = self.strongly_connected_components(graph)
cycles_found = 0
for component in components:
# Node of 1 is also a strongly connected component but hardly a cycle, so we filter them out
if len(component) > 1:
cycles_found += 1
log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component))))
@ -72,6 +107,11 @@ class TaskList(object):
return sorted_tasks
def get_all_tasks(self):
"""Gets a list of all task classes in the package
Returns:
list. A list of all tasks in the package
"""
# Get a generator that returns all classes in the package
classes = self.get_all_classes('..')
@ -81,8 +121,18 @@ class TaskList(object):
return issubclass(obj, Task) and obj is not Task
return filter(is_task, classes) # Only return classes that are tasks
# Given a path, retrieve all the classes in it
def get_all_classes(self, path=None):
""" Given a path to a package, this function retrieves all the classes in it
Args:
path (str): Path to the package
Returns:
generator. A generator that yields classes
Raises:
Exception
"""
import pkgutil
import importlib
import inspect
@ -99,13 +149,28 @@ class TaskList(object):
yield obj
def check_ordering(self, task):
"""Checks the ordering of a task in relation to other tasks and their phases
This function checks for a subset of what the strongly connected components algorithm does,
but can deliver a more precise error message, namely that there is a conflict between
what a task has specified as its predecessors or successors and in which phase it is placed.
Args:
task (Task): The task to check the ordering for
Raises:
TaskListError
"""
for successor in task.successors:
# Run through all successors and check whether the phase of the task
# comes before the phase of a successor
if successor.phase > successor.phase:
msg = ("The task {task} is specified as running before {other}, "
"but its phase '{phase}' lies after the phase '{other_phase}'"
.format(task=task, other=successor, phase=task.phase, other_phase=successor.phase))
raise TaskListError(msg)
for predecessor in task.predecessors:
# Run through all predecessors and check whether the phase of the task
# comes after the phase of a predecessor
if task.phase < predecessor.phase:
msg = ("The task {task} is specified as running after {other}, "
"but its phase '{phase}' lies before the phase '{other_phase}'"
@ -113,9 +178,15 @@ class TaskList(object):
raise TaskListError(msg)
def strongly_connected_components(self, graph):
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
# Find the strongly connected components in a graph using Tarjan's algorithm.
# graph should be a dictionary mapping node names to lists of successor nodes.
"""Find the strongly connected components in a graph using Tarjan's algorithm.
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Args:
graph (dict): mapping of tasks to lists of successor tasks
Returns:
list. List of tuples that are strongly connected comoponents
"""
result = []
stack = []
@ -147,7 +218,15 @@ class TaskList(object):
return result
def topological_sort(self, graph):
# Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
"""Runs a topological sort on a graph
Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
Args:
graph (dict): mapping of tasks to lists of successor tasks
Returns:
list. A list of all tasks in the graph sorted according to ther dependencies
"""
count = {}
for node in graph:
count[node] = 0

View file

@ -99,3 +99,13 @@ class ApplyPuppetManifest(Task):
from common.tools import sed_i
hosts_path = os.path.join(info.root, 'etc/hosts')
sed_i(hosts_path, '127.0.0.1\s*{hostname}\n?'.format(hostname=hostname), '')
class EnableAgent(Task):
description = 'Enabling the puppet agent'
phase = phases.system_modification
@classmethod
def run(cls, info):
puppet_defaults = os.path.join(info.root, 'etc/defaults/puppet')
sed_i(puppet_defaults, 'START=no', 'START=yes')