|
|
@@ -14,6 +14,9 @@ from docker.errors import APIError
|
|
|
from docker.errors import ImageNotFound
|
|
|
from docker.errors import NotFound
|
|
|
from docker.types import LogConfig
|
|
|
+from docker.types import Mount
|
|
|
+from docker.utils import version_gte
|
|
|
+from docker.utils import version_lt
|
|
|
from docker.utils.ports import build_port_bindings
|
|
|
from docker.utils.ports import split_port
|
|
|
from docker.utils.utils import convert_tmpfs_mounts
|
|
|
@@ -23,7 +26,9 @@ from . import const
|
|
|
from . import progress_stream
|
|
|
from .config import DOCKER_CONFIG_KEYS
|
|
|
from .config import merge_environment
|
|
|
+from .config import merge_labels
|
|
|
from .config.errors import DependencyError
|
|
|
+from .config.types import MountSpec
|
|
|
from .config.types import ServicePort
|
|
|
from .config.types import VolumeSpec
|
|
|
from .const import DEFAULT_TIMEOUT
|
|
|
@@ -76,6 +81,7 @@ HOST_CONFIG_KEYS = [
|
|
|
'mem_reservation',
|
|
|
'memswap_limit',
|
|
|
'mem_swappiness',
|
|
|
+ 'oom_kill_disable',
|
|
|
'oom_score_adj',
|
|
|
'pid',
|
|
|
'pids_limit',
|
|
|
@@ -378,11 +384,11 @@ class Service(object):
|
|
|
|
|
|
return has_diverged
|
|
|
|
|
|
- def _execute_convergence_create(self, scale, detached, start):
|
|
|
+ def _execute_convergence_create(self, scale, detached, start, project_services=None):
|
|
|
i = self._next_container_number()
|
|
|
|
|
|
def create_and_start(service, n):
|
|
|
- container = service.create_container(number=n)
|
|
|
+ container = service.create_container(number=n, quiet=True)
|
|
|
if not detached:
|
|
|
container.attach_log_stream()
|
|
|
if start:
|
|
|
@@ -390,10 +396,11 @@ class Service(object):
|
|
|
return container
|
|
|
|
|
|
containers, errors = parallel_execute(
|
|
|
- range(i, i + scale),
|
|
|
- lambda n: create_and_start(self, n),
|
|
|
- lambda n: self.get_container_name(n),
|
|
|
+ [ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
|
|
|
+ lambda service_name: create_and_start(self, service_name.number),
|
|
|
+ lambda service_name: self.get_container_name(service_name.service, service_name.number),
|
|
|
"Creating",
|
|
|
+ parent_objects=project_services
|
|
|
)
|
|
|
for error in errors.values():
|
|
|
raise OperationFailedError(error)
|
|
|
@@ -432,7 +439,7 @@ class Service(object):
|
|
|
if start:
|
|
|
_, errors = parallel_execute(
|
|
|
containers,
|
|
|
- lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
|
|
|
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
|
|
|
lambda c: c.name,
|
|
|
"Starting",
|
|
|
)
|
|
|
@@ -459,7 +466,7 @@ class Service(object):
|
|
|
)
|
|
|
|
|
|
def execute_convergence_plan(self, plan, timeout=None, detached=False,
|
|
|
- start=True, scale_override=None, rescale=True):
|
|
|
+ start=True, scale_override=None, rescale=True, project_services=None):
|
|
|
(action, containers) = plan
|
|
|
scale = scale_override if scale_override is not None else self.scale_num
|
|
|
containers = sorted(containers, key=attrgetter('number'))
|
|
|
@@ -468,7 +475,7 @@ class Service(object):
|
|
|
|
|
|
if action == 'create':
|
|
|
return self._execute_convergence_create(
|
|
|
- scale, detached, start
|
|
|
+ scale, detached, start, project_services
|
|
|
)
|
|
|
|
|
|
# The create action needs always needs an initial scale, but otherwise,
|
|
|
@@ -510,7 +517,6 @@ class Service(object):
|
|
|
volumes can be copied to the new container, before the original
|
|
|
container is removed.
|
|
|
"""
|
|
|
- log.info("Recreating %s" % container.name)
|
|
|
|
|
|
container.stop(timeout=self.stop_timeout(timeout))
|
|
|
container.rename_to_tmp_name()
|
|
|
@@ -741,21 +747,26 @@ class Service(object):
|
|
|
container_options.update(override_options)
|
|
|
|
|
|
if not container_options.get('name'):
|
|
|
- container_options['name'] = self.get_container_name(number, one_off)
|
|
|
+ container_options['name'] = self.get_container_name(self.name, number, one_off)
|
|
|
|
|
|
container_options.setdefault('detach', True)
|
|
|
|
|
|
# If a qualified hostname was given, split it into an
|
|
|
# unqualified hostname and a domainname unless domainname
|
|
|
- # was also given explicitly. This matches the behavior of
|
|
|
- # the official Docker CLI in that scenario.
|
|
|
- if ('hostname' in container_options and
|
|
|
+ # was also given explicitly. This matches behavior
|
|
|
+ # until Docker Engine 1.11.0 - Docker API 1.23.
|
|
|
+ if (version_lt(self.client.api_version, '1.23') and
|
|
|
+ 'hostname' in container_options and
|
|
|
'domainname' not in container_options and
|
|
|
'.' in container_options['hostname']):
|
|
|
parts = container_options['hostname'].partition('.')
|
|
|
container_options['hostname'] = parts[0]
|
|
|
container_options['domainname'] = parts[2]
|
|
|
|
|
|
+ if (version_gte(self.client.api_version, '1.25') and
|
|
|
+ 'stop_grace_period' in self.options):
|
|
|
+ container_options['stop_timeout'] = self.stop_timeout(None)
|
|
|
+
|
|
|
if 'ports' in container_options or 'expose' in self.options:
|
|
|
container_options['ports'] = build_container_ports(
|
|
|
formatted_ports(container_options.get('ports', [])),
|
|
|
@@ -770,21 +781,38 @@ class Service(object):
|
|
|
self.options.get('environment'),
|
|
|
override_options.get('environment'))
|
|
|
|
|
|
+ container_options['labels'] = merge_labels(
|
|
|
+ self.options.get('labels'),
|
|
|
+ override_options.get('labels'))
|
|
|
+
|
|
|
+ container_volumes = []
|
|
|
+ container_mounts = []
|
|
|
+ if 'volumes' in container_options:
|
|
|
+ container_volumes = [
|
|
|
+ v for v in container_options.get('volumes') if isinstance(v, VolumeSpec)
|
|
|
+ ]
|
|
|
+ container_mounts = [v for v in container_options.get('volumes') if isinstance(v, MountSpec)]
|
|
|
+
|
|
|
binds, affinity = merge_volume_bindings(
|
|
|
- container_options.get('volumes') or [],
|
|
|
- self.options.get('tmpfs') or [],
|
|
|
- previous_container)
|
|
|
+ container_volumes, self.options.get('tmpfs') or [], previous_container,
|
|
|
+ container_mounts
|
|
|
+ )
|
|
|
override_options['binds'] = binds
|
|
|
container_options['environment'].update(affinity)
|
|
|
|
|
|
- container_options['volumes'] = dict(
|
|
|
- (v.internal, {}) for v in container_options.get('volumes') or {})
|
|
|
+ container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
|
|
|
+ override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
|
|
|
|
|
|
secret_volumes = self.get_secret_volumes()
|
|
|
if secret_volumes:
|
|
|
- override_options['binds'].extend(v.repr() for v in secret_volumes)
|
|
|
- container_options['volumes'].update(
|
|
|
- (v.internal, {}) for v in secret_volumes)
|
|
|
+ if version_lt(self.client.api_version, '1.30'):
|
|
|
+ override_options['binds'].extend(v.legacy_repr() for v in secret_volumes)
|
|
|
+ container_options['volumes'].update(
|
|
|
+ (v.target, {}) for v in secret_volumes
|
|
|
+ )
|
|
|
+ else:
|
|
|
+ override_options['mounts'] = override_options.get('mounts') or []
|
|
|
+ override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
|
|
|
|
|
|
container_options['image'] = self.image_name
|
|
|
|
|
|
@@ -857,6 +885,7 @@ class Service(object):
|
|
|
sysctls=options.get('sysctls'),
|
|
|
pids_limit=options.get('pids_limit'),
|
|
|
tmpfs=options.get('tmpfs'),
|
|
|
+ oom_kill_disable=options.get('oom_kill_disable'),
|
|
|
oom_score_adj=options.get('oom_score_adj'),
|
|
|
mem_swappiness=options.get('mem_swappiness'),
|
|
|
group_add=options.get('group_add'),
|
|
|
@@ -877,6 +906,7 @@ class Service(object):
|
|
|
device_read_iops=blkio_config.get('device_read_iops'),
|
|
|
device_write_bps=blkio_config.get('device_write_bps'),
|
|
|
device_write_iops=blkio_config.get('device_write_iops'),
|
|
|
+ mounts=options.get('mounts'),
|
|
|
)
|
|
|
|
|
|
def get_secret_volumes(self):
|
|
|
@@ -887,11 +917,11 @@ class Service(object):
|
|
|
elif not os.path.isabs(target):
|
|
|
target = '{}/{}'.format(const.SECRETS_PATH, target)
|
|
|
|
|
|
- return VolumeSpec(secret['file'], target, 'ro')
|
|
|
+ return MountSpec('bind', secret['file'], target, read_only=True)
|
|
|
|
|
|
return [build_spec(secret) for secret in self.secrets]
|
|
|
|
|
|
- def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
|
|
|
+ def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None):
|
|
|
log.info('Building %s' % self.name)
|
|
|
|
|
|
build_opts = self.options.get('build', {})
|
|
|
@@ -921,6 +951,10 @@ class Service(object):
|
|
|
network_mode=build_opts.get('network', None),
|
|
|
target=build_opts.get('target', None),
|
|
|
shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
|
|
|
+ extra_hosts=build_opts.get('extra_hosts', None),
|
|
|
+ container_limits={
|
|
|
+ 'memory': parse_bytes(memory) if memory else None
|
|
|
+ },
|
|
|
)
|
|
|
|
|
|
try:
|
|
|
@@ -960,12 +994,12 @@ class Service(object):
|
|
|
def custom_container_name(self):
|
|
|
return self.options.get('container_name')
|
|
|
|
|
|
- def get_container_name(self, number, one_off=False):
|
|
|
+ def get_container_name(self, service_name, number, one_off=False):
|
|
|
if self.custom_container_name and not one_off:
|
|
|
return self.custom_container_name
|
|
|
|
|
|
container_name = build_container_name(
|
|
|
- self.project, self.name, number, one_off,
|
|
|
+ self.project, service_name, number, one_off,
|
|
|
)
|
|
|
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
|
|
|
if container_name in ext_links_origins:
|
|
|
@@ -1220,32 +1254,40 @@ def parse_repository_tag(repo_path):
|
|
|
# Volumes
|
|
|
|
|
|
|
|
|
-def merge_volume_bindings(volumes, tmpfs, previous_container):
|
|
|
- """Return a list of volume bindings for a container. Container data volumes
|
|
|
- are replaced by those from the previous container.
|
|
|
+def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
|
|
|
+ """
|
|
|
+ Return a list of volume bindings for a container. Container data volumes
|
|
|
+ are replaced by those from the previous container.
|
|
|
+ Anonymous mounts are updated in place.
|
|
|
"""
|
|
|
affinity = {}
|
|
|
|
|
|
volume_bindings = dict(
|
|
|
build_volume_binding(volume)
|
|
|
for volume in volumes
|
|
|
- if volume.external)
|
|
|
+ if volume.external
|
|
|
+ )
|
|
|
|
|
|
if previous_container:
|
|
|
- old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
|
|
|
+ old_volumes, old_mounts = get_container_data_volumes(
|
|
|
+ previous_container, volumes, tmpfs, mounts
|
|
|
+ )
|
|
|
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
|
|
|
volume_bindings.update(
|
|
|
- build_volume_binding(volume) for volume in old_volumes)
|
|
|
+ build_volume_binding(volume) for volume in old_volumes
|
|
|
+ )
|
|
|
|
|
|
- if old_volumes:
|
|
|
+ if old_volumes or old_mounts:
|
|
|
affinity = {'affinity:container': '=' + previous_container.id}
|
|
|
|
|
|
return list(volume_bindings.values()), affinity
|
|
|
|
|
|
|
|
|
-def get_container_data_volumes(container, volumes_option, tmpfs_option):
|
|
|
- """Find the container data volumes that are in `volumes_option`, and return
|
|
|
- a mapping of volume bindings for those volumes.
|
|
|
+def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
|
|
|
+ """
|
|
|
+ Find the container data volumes that are in `volumes_option`, and return
|
|
|
+ a mapping of volume bindings for those volumes.
|
|
|
+ Anonymous volume mounts are updated in place instead.
|
|
|
"""
|
|
|
volumes = []
|
|
|
volumes_option = volumes_option or []
|
|
|
@@ -1284,7 +1326,19 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option):
|
|
|
volume = volume._replace(external=mount['Name'])
|
|
|
volumes.append(volume)
|
|
|
|
|
|
- return volumes
|
|
|
+ updated_mounts = False
|
|
|
+ for mount in mounts_option:
|
|
|
+ if mount.type != 'volume':
|
|
|
+ continue
|
|
|
+
|
|
|
+ ctnr_mount = container_mounts.get(mount.target)
|
|
|
+ if not ctnr_mount.get('Name'):
|
|
|
+ continue
|
|
|
+
|
|
|
+ mount.source = ctnr_mount['Name']
|
|
|
+ updated_mounts = True
|
|
|
+
|
|
|
+ return volumes, updated_mounts
|
|
|
|
|
|
|
|
|
def warn_on_masked_volume(volumes_option, container_volumes, service):
|
|
|
@@ -1331,6 +1385,18 @@ def build_volume_from(volume_from_spec):
|
|
|
return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
|
|
|
|
|
|
|
|
|
+def build_mount(mount_spec):
|
|
|
+ kwargs = {}
|
|
|
+ if mount_spec.options:
|
|
|
+ for option, sdk_name in mount_spec.options_map[mount_spec.type].items():
|
|
|
+ if option in mount_spec.options:
|
|
|
+ kwargs[sdk_name] = mount_spec.options[option]
|
|
|
+
|
|
|
+ return Mount(
|
|
|
+ type=mount_spec.type, target=mount_spec.target, source=mount_spec.source,
|
|
|
+ read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
|
|
|
+ )
|
|
|
+
|
|
|
# Labels
|
|
|
|
|
|
|