浏览代码

Merge pull request #5449 from docker/bump-1.18.0-rc1

Bump 1.18.0 RC1
Joffrey F 8 年之前
父节点
当前提交
8fc6ac1899
共有 53 个文件被更改,包括 1650 次插入280 次删除
  1. 1 0
      .travis.yml
  2. 80 0
      CHANGELOG.md
  3. 2 2
      CONTRIBUTING.md
  4. 1 1
      compose/__init__.py
  5. 59 26
      compose/cli/main.py
  6. 2 0
      compose/config/__init__.py
  7. 45 44
      compose/config/config.py
  8. 3 1
      compose/config/config_schema_v2.1.json
  9. 3 1
      compose/config/config_schema_v2.2.json
  10. 38 3
      compose/config/config_schema_v2.3.json
  11. 1 1
      compose/config/config_schema_v3.0.json
  12. 1 1
      compose/config/config_schema_v3.1.json
  13. 2 1
      compose/config/config_schema_v3.2.json
  14. 2 1
      compose/config/config_schema_v3.3.json
  15. 2 1
      compose/config/config_schema_v3.4.json
  16. 45 14
      compose/config/config_schema_v3.5.json
  17. 1 1
      compose/config/environment.py
  18. 92 6
      compose/config/interpolation.py
  19. 15 1
      compose/config/serialize.py
  20. 71 7
      compose/config/types.py
  21. 60 1
      compose/config/validation.py
  22. 14 11
      compose/network.py
  23. 16 7
      compose/parallel.py
  24. 30 6
      compose/project.py
  25. 102 36
      compose/service.py
  26. 1 1
      compose/utils.py
  27. 7 2
      compose/volume.py
  28. 3 3
      contrib/completion/bash/docker-compose
  29. 1 0
      contrib/completion/zsh/_docker-compose
  30. 5 0
      docker-compose.spec
  31. 1 1
      project/RELEASE-PROCESS.md
  32. 3 3
      requirements.txt
  33. 5 0
      script/release/download-binaries
  34. 1 1
      script/run/run.sh
  35. 3 3
      setup.py
  36. 100 20
      tests/acceptance/cli_test.py
  37. 4 0
      tests/fixtures/build-memory/Dockerfile
  38. 6 0
      tests/fixtures/build-memory/docker-compose.yml
  39. 10 0
      tests/fixtures/environment-exec/docker-compose.yml
  40. 13 0
      tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml
  41. 17 0
      tests/fixtures/networks/external-networks-v3-5.yml
  42. 7 0
      tests/fixtures/run-labels/docker-compose.yml
  43. 8 5
      tests/helpers.py
  44. 58 0
      tests/integration/project_test.py
  45. 112 2
      tests/integration/service_test.py
  46. 3 3
      tests/integration/testcases.py
  47. 7 0
      tests/unit/cli_test.py
  48. 311 43
      tests/unit/config/config_test.py
  49. 14 0
      tests/unit/config/environment_test.py
  50. 195 3
      tests/unit/config/interpolation_test.py
  51. 26 0
      tests/unit/config/types_test.py
  52. 41 16
      tests/unit/service_test.py
  53. 0 1
      tox.ini

+ 1 - 0
.travis.yml

@@ -8,6 +8,7 @@ matrix:
       services:
       services:
       - docker
       - docker
     - os: osx
     - os: osx
+      osx_image: xcode7.3
       language: generic
       language: generic
 
 
 install: ./script/travis/install
 install: ./script/travis/install

+ 80 - 0
CHANGELOG.md

@@ -1,6 +1,86 @@
 Change log
 Change log
 ==========
 ==========
 
 
+1.18.0 (2017-12-15)
+-------------------
+
+### New features
+
+#### Compose file version 3.5
+
+- Introduced version 3.5 of the `docker-compose.yml` specification.
+  This version requires to be used with Docker Engine 17.06.0 or above
+
+- Added support for the `shm_size` parameter in build configurations
+
+- Added support for the `isolation` parameter in service definitions
+
+- Added support for custom names for network, secret and config definitions
+
+#### Compose file version 2.3
+
+- Added support for `extra_hosts` in build configuration
+
+- Added support for the
+  [long syntax](https://docs.docker.com/compose/compose-file/#long-syntax-3)
+  for volume entries, as previously introduced in the 3.2 format.
+  Note that using this syntax will create
+  [mounts](https://docs.docker.com/engine/admin/volumes/bind-mounts/)
+  instead of volumes.
+
+#### Compose file version 2.1 and up
+
+- Added support for the `oom_kill_disable` parameter in service definitions
+  (2.x only)
+
+- Added support for custom names for network, secret and config definitions
+  (2.x only)
+
+
+#### All formats
+
+- Values interpolated from the environment will now be converted to the
+  proper type when used in non-string fields.
+
+- Added support for `--labels` in `docker-compose run`
+
+- Added support for `--timeout` in `docker-compose down`
+
+- Added support for `--memory` in `docker-compose build`
+
+- Setting `stop_grace_period` in service definitions now also sets the
+  container's `stop_timeout`
+
+### Bugfixes
+
+- Fixed an issue where Compose was still handling service hostname according
+  to legacy engine behavior, causing hostnames containing dots to be cut up
+
+- Fixed a bug where the `X-Y:Z` syntax for ports was considered invalid
+  by Compose
+
+- Fixed an issue with CLI logging causing duplicate messages and inelegant
+  output to occur
+
+- Fixed a bug where the valid `${VAR:-}` syntax would cause Compose to
+  error out
+
+- Fixed a bug where `env_file` entries using an UTF-8 BOM were being read
+  incorrectly
+
+- Fixed a bug where missing secret files would generate an empty directory
+  in their place
+
+- Added validation for the `test` field in healthchecks
+
+- Added validation for the `subnet` field in IPAM configurations
+
+- Added validation for `volumes` properties when using the long syntax in
+  service definitions
+
+- The CLI now explicit prevents using `-d` and `--timeout` together
+  in `docker-compose up`
+
 1.17.1 (2017-11-08)
 1.17.1 (2017-11-08)
 ------------------
 ------------------
 
 

+ 2 - 2
CONTRIBUTING.md

@@ -64,8 +64,8 @@ you can specify a test directory, file, module, class or method:
 
 
     $ script/test/default tests/unit
     $ script/test/default tests/unit
     $ script/test/default tests/unit/cli_test.py
     $ script/test/default tests/unit/cli_test.py
-    $ script/test/default tests/unit/config_test.py::ConfigTest
-    $ script/test/default tests/unit/config_test.py::ConfigTest::test_load
+    $ script/test/default tests/unit/config/config_test.py::ConfigTest
+    $ script/test/default tests/unit/config/config_test.py::ConfigTest::test_load
 
 
 ## Finding things to work on
 ## Finding things to work on
 
 

+ 1 - 1
compose/__init__.py

@@ -1,4 +1,4 @@
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
-__version__ = '1.17.1'
+__version__ = '1.18.0-rc1'

+ 59 - 26
compose/cli/main.py

@@ -14,6 +14,8 @@ from distutils.spawn import find_executable
 from inspect import getdoc
 from inspect import getdoc
 from operator import attrgetter
 from operator import attrgetter
 
 
+import docker
+
 from . import errors
 from . import errors
 from . import signals
 from . import signals
 from .. import __version__
 from .. import __version__
@@ -22,6 +24,7 @@ from ..bundle import MissingDigests
 from ..bundle import serialize_bundle
 from ..bundle import serialize_bundle
 from ..config import ConfigurationError
 from ..config import ConfigurationError
 from ..config import parse_environment
 from ..config import parse_environment
+from ..config import parse_labels
 from ..config import resolve_build_args
 from ..config import resolve_build_args
 from ..config.environment import Environment
 from ..config.environment import Environment
 from ..config.serialize import serialize_config
 from ..config.serialize import serialize_config
@@ -230,6 +233,7 @@ class TopLevelCommand(object):
             --force-rm              Always remove intermediate containers.
             --force-rm              Always remove intermediate containers.
             --no-cache              Do not use cache when building the image.
             --no-cache              Do not use cache when building the image.
             --pull                  Always attempt to pull a newer version of the image.
             --pull                  Always attempt to pull a newer version of the image.
+            -m, --memory MEM        Sets memory limit for the bulid container.
             --build-arg key=val     Set build-time variables for one service.
             --build-arg key=val     Set build-time variables for one service.
         """
         """
         service_names = options['SERVICE']
         service_names = options['SERVICE']
@@ -246,6 +250,7 @@ class TopLevelCommand(object):
             no_cache=bool(options.get('--no-cache', False)),
             no_cache=bool(options.get('--no-cache', False)),
             pull=bool(options.get('--pull', False)),
             pull=bool(options.get('--pull', False)),
             force_rm=bool(options.get('--force-rm', False)),
             force_rm=bool(options.get('--force-rm', False)),
+            memory=options.get('--memory'),
             build_args=build_args)
             build_args=build_args)
 
 
     def bundle(self, config_options, options):
     def bundle(self, config_options, options):
@@ -369,9 +374,12 @@ class TopLevelCommand(object):
                                 attached to containers.
                                 attached to containers.
             --remove-orphans    Remove containers for services not defined in the
             --remove-orphans    Remove containers for services not defined in the
                                 Compose file
                                 Compose file
+            -t, --timeout TIMEOUT      Specify a shutdown timeout in seconds.
+                                     (default: 10)
         """
         """
         image_type = image_type_from_opt('--rmi', options['--rmi'])
         image_type = image_type_from_opt('--rmi', options['--rmi'])
-        self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
+        timeout = timeout_from_opts(options)
+        self.project.down(image_type, options['--volumes'], options['--remove-orphans'], timeout=timeout)
 
 
     def events(self, options):
     def events(self, options):
         """
         """
@@ -402,7 +410,7 @@ class TopLevelCommand(object):
         """
         """
         Execute a command in a running container
         Execute a command in a running container
 
 
-        Usage: exec [options] SERVICE COMMAND [ARGS...]
+        Usage: exec [options] [-e KEY=VAL...] SERVICE COMMAND [ARGS...]
 
 
         Options:
         Options:
             -d                Detached mode: Run command in the background.
             -d                Detached mode: Run command in the background.
@@ -412,11 +420,16 @@ class TopLevelCommand(object):
                               allocates a TTY.
                               allocates a TTY.
             --index=index     index of the container if there are multiple
             --index=index     index of the container if there are multiple
                               instances of a service [default: 1]
                               instances of a service [default: 1]
+            -e, --env KEY=VAL Set environment variables (can be used multiple times,
+                              not supported in API < 1.25)
         """
         """
         index = int(options.get('--index'))
         index = int(options.get('--index'))
         service = self.project.get_service(options['SERVICE'])
         service = self.project.get_service(options['SERVICE'])
         detach = options['-d']
         detach = options['-d']
 
 
+        if options['--env'] and docker.utils.version_lt(self.project.client.api_version, '1.25'):
+            raise UserError("Setting environment for exec is not supported in API < 1.25'")
+
         try:
         try:
             container = service.get_container(number=index)
             container = service.get_container(number=index)
         except ValueError as e:
         except ValueError as e:
@@ -425,26 +438,7 @@ class TopLevelCommand(object):
         tty = not options["-T"]
         tty = not options["-T"]
 
 
         if IS_WINDOWS_PLATFORM and not detach:
         if IS_WINDOWS_PLATFORM and not detach:
-            args = ["exec"]
-
-            if options["-d"]:
-                args += ["--detach"]
-            else:
-                args += ["--interactive"]
-
-            if not options["-T"]:
-                args += ["--tty"]
-
-            if options["--privileged"]:
-                args += ["--privileged"]
-
-            if options["--user"]:
-                args += ["--user", options["--user"]]
-
-            args += [container.id]
-            args += command
-
-            sys.exit(call_docker(args))
+            sys.exit(call_docker(build_exec_command(options, container.id, command)))
 
 
         create_exec_options = {
         create_exec_options = {
             "privileged": options["--privileged"],
             "privileged": options["--privileged"],
@@ -453,6 +447,9 @@ class TopLevelCommand(object):
             "stdin": tty,
             "stdin": tty,
         }
         }
 
 
+        if docker.utils.version_gte(self.project.client.api_version, '1.25'):
+            create_exec_options["environment"] = options["--env"]
+
         exec_id = container.create_exec(command, **create_exec_options)
         exec_id = container.create_exec(command, **create_exec_options)
 
 
         if detach:
         if detach:
@@ -729,7 +726,9 @@ class TopLevelCommand(object):
         running. If you do not want to start linked services, use
         running. If you do not want to start linked services, use
         `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
         `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
 
 
-        Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
+        Usage:
+            run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...]
+                SERVICE [COMMAND] [ARGS...]
 
 
         Options:
         Options:
             -d                    Detached mode: Run container in the background, print
             -d                    Detached mode: Run container in the background, print
@@ -737,6 +736,7 @@ class TopLevelCommand(object):
             --name NAME           Assign a name to the container
             --name NAME           Assign a name to the container
             --entrypoint CMD      Override the entrypoint of the image.
             --entrypoint CMD      Override the entrypoint of the image.
             -e KEY=VAL            Set an environment variable (can be used multiple times)
             -e KEY=VAL            Set an environment variable (can be used multiple times)
+            -l, --label KEY=VAL   Add or override a label (can be used multiple times)
             -u, --user=""         Run as specified username or uid
             -u, --user=""         Run as specified username or uid
             --no-deps             Don't start linked services.
             --no-deps             Don't start linked services.
             --rm                  Remove container after run. Ignored in detached mode.
             --rm                  Remove container after run. Ignored in detached mode.
@@ -898,8 +898,8 @@ class TopLevelCommand(object):
 
 
         Options:
         Options:
             -d                         Detached mode: Run containers in the background,
             -d                         Detached mode: Run containers in the background,
-                                       print new container names.
-                                       Incompatible with --abort-on-container-exit.
+                                       print new container names. Incompatible with
+                                       --abort-on-container-exit and --timeout.
             --no-color                 Produce monochrome output.
             --no-color                 Produce monochrome output.
             --no-deps                  Don't start linked services.
             --no-deps                  Don't start linked services.
             --force-recreate           Recreate containers even if their configuration
             --force-recreate           Recreate containers even if their configuration
@@ -913,7 +913,8 @@ class TopLevelCommand(object):
             --abort-on-container-exit  Stops all containers if any container was stopped.
             --abort-on-container-exit  Stops all containers if any container was stopped.
                                        Incompatible with -d.
                                        Incompatible with -d.
             -t, --timeout TIMEOUT      Use this timeout in seconds for container shutdown
             -t, --timeout TIMEOUT      Use this timeout in seconds for container shutdown
-                                       when attached or when containers are already
+                                       when attached or when containers are already.
+                                       Incompatible with -d.
                                        running. (default: 10)
                                        running. (default: 10)
             --remove-orphans           Remove containers for services not
             --remove-orphans           Remove containers for services not
                                        defined in the Compose file
                                        defined in the Compose file
@@ -934,6 +935,9 @@ class TopLevelCommand(object):
         if detached and (cascade_stop or exit_value_from):
         if detached and (cascade_stop or exit_value_from):
             raise UserError("--abort-on-container-exit and -d cannot be combined.")
             raise UserError("--abort-on-container-exit and -d cannot be combined.")
 
 
+        if detached and timeout:
+            raise UserError("-d and --timeout cannot be combined.")
+
         if no_start:
         if no_start:
             for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
             for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
                 if options.get(excluded):
                 if options.get(excluded):
@@ -1131,6 +1135,9 @@ def build_container_options(options, detach, command):
             parse_environment(options['-e'])
             parse_environment(options['-e'])
         )
         )
 
 
+    if options['--label']:
+        container_options['labels'] = parse_labels(options['--label'])
+
     if options['--entrypoint']:
     if options['--entrypoint']:
         container_options['entrypoint'] = options.get('--entrypoint')
         container_options['entrypoint'] = options.get('--entrypoint')
 
 
@@ -1295,3 +1302,29 @@ def parse_scale_args(options):
             )
             )
         res[service_name] = num
         res[service_name] = num
     return res
     return res
+
+
+def build_exec_command(options, container_id, command):
+    args = ["exec"]
+
+    if options["-d"]:
+        args += ["--detach"]
+    else:
+        args += ["--interactive"]
+
+    if not options["-T"]:
+        args += ["--tty"]
+
+    if options["--privileged"]:
+        args += ["--privileged"]
+
+    if options["--user"]:
+        args += ["--user", options["--user"]]
+
+    if options["--env"]:
+        for env_variable in options["--env"]:
+            args += ["--env", env_variable]
+
+    args += [container_id]
+    args += command
+    return args

+ 2 - 0
compose/config/__init__.py

@@ -8,5 +8,7 @@ from .config import DOCKER_CONFIG_KEYS
 from .config import find
 from .config import find
 from .config import load
 from .config import load
 from .config import merge_environment
 from .config import merge_environment
+from .config import merge_labels
 from .config import parse_environment
 from .config import parse_environment
+from .config import parse_labels
 from .config import resolve_build_args
 from .config import resolve_build_args

+ 45 - 44
compose/config/config.py

@@ -35,6 +35,7 @@ from .interpolation import interpolate_environment_variables
 from .sort_services import get_container_name_from_network_mode
 from .sort_services import get_container_name_from_network_mode
 from .sort_services import get_service_name_from_network_mode
 from .sort_services import get_service_name_from_network_mode
 from .sort_services import sort_service_dicts
 from .sort_services import sort_service_dicts
+from .types import MountSpec
 from .types import parse_extra_hosts
 from .types import parse_extra_hosts
 from .types import parse_restart_spec
 from .types import parse_restart_spec
 from .types import ServiceLink
 from .types import ServiceLink
@@ -47,6 +48,7 @@ from .validation import validate_config_section
 from .validation import validate_cpu
 from .validation import validate_cpu
 from .validation import validate_depends_on
 from .validation import validate_depends_on
 from .validation import validate_extends_file_path
 from .validation import validate_extends_file_path
+from .validation import validate_healthcheck
 from .validation import validate_links
 from .validation import validate_links
 from .validation import validate_network_mode
 from .validation import validate_network_mode
 from .validation import validate_pid_mode
 from .validation import validate_pid_mode
@@ -90,6 +92,7 @@ DOCKER_CONFIG_KEYS = [
     'mem_swappiness',
     'mem_swappiness',
     'net',
     'net',
     'oom_score_adj',
     'oom_score_adj',
+    'oom_kill_disable',
     'pid',
     'pid',
     'ports',
     'ports',
     'privileged',
     'privileged',
@@ -407,12 +410,11 @@ def load_mapping(config_files, get_func, entity_type, working_dir=None):
 
 
             external = config.get('external')
             external = config.get('external')
             if external:
             if external:
-                name_field = 'name' if entity_type == 'Volume' else 'external_name'
                 validate_external(entity_type, name, config, config_file.version)
                 validate_external(entity_type, name, config, config_file.version)
                 if isinstance(external, dict):
                 if isinstance(external, dict):
-                    config[name_field] = external.get('name')
+                    config['name'] = external.get('name')
                 elif not config.get('name'):
                 elif not config.get('name'):
-                    config[name_field] = name
+                    config['name'] = name
 
 
             if 'driver_opts' in config:
             if 'driver_opts' in config:
                 config['driver_opts'] = build_string_dict(
                 config['driver_opts'] = build_string_dict(
@@ -519,13 +521,13 @@ def process_config_file(config_file, environment, service_name=None):
             processed_config['secrets'] = interpolate_config_section(
             processed_config['secrets'] = interpolate_config_section(
                 config_file,
                 config_file,
                 config_file.get_secrets(),
                 config_file.get_secrets(),
-                'secrets',
+                'secret',
                 environment)
                 environment)
         if config_file.version >= const.COMPOSEFILE_V3_3:
         if config_file.version >= const.COMPOSEFILE_V3_3:
             processed_config['configs'] = interpolate_config_section(
             processed_config['configs'] = interpolate_config_section(
                 config_file,
                 config_file,
                 config_file.get_configs(),
                 config_file.get_configs(),
-                'configs',
+                'config',
                 environment
                 environment
             )
             )
     else:
     else:
@@ -686,6 +688,7 @@ def validate_service(service_config, service_names, config_file):
     validate_pid_mode(service_config, service_names)
     validate_pid_mode(service_config, service_names)
     validate_depends_on(service_config, service_names)
     validate_depends_on(service_config, service_names)
     validate_links(service_config, service_names)
     validate_links(service_config, service_names)
+    validate_healthcheck(service_config)
 
 
     if not service_dict.get('image') and has_uppercase(service_name):
     if not service_dict.get('image') and has_uppercase(service_name):
         raise ConfigurationError(
         raise ConfigurationError(
@@ -724,7 +727,7 @@ def process_service(service_config):
             service_dict[field] = to_list(service_dict[field])
             service_dict[field] = to_list(service_dict[field])
 
 
     service_dict = process_blkio_config(process_ports(
     service_dict = process_blkio_config(process_ports(
-        process_healthcheck(service_dict, service_config.name)
+        process_healthcheck(service_dict)
     ))
     ))
 
 
     return service_dict
     return service_dict
@@ -788,33 +791,35 @@ def process_blkio_config(service_dict):
     return service_dict
     return service_dict
 
 
 
 
-def process_healthcheck(service_dict, service_name):
+def process_healthcheck(service_dict):
     if 'healthcheck' not in service_dict:
     if 'healthcheck' not in service_dict:
         return service_dict
         return service_dict
 
 
-    hc = {}
-    raw = service_dict['healthcheck']
+    hc = service_dict['healthcheck']
 
 
-    if raw.get('disable'):
-        if len(raw) > 1:
-            raise ConfigurationError(
-                'Service "{}" defines an invalid healthcheck: '
-                '"disable: true" cannot be combined with other options'
-                .format(service_name))
+    if 'disable' in hc:
+        del hc['disable']
         hc['test'] = ['NONE']
         hc['test'] = ['NONE']
-    elif 'test' in raw:
-        hc['test'] = raw['test']
 
 
     for field in ['interval', 'timeout', 'start_period']:
     for field in ['interval', 'timeout', 'start_period']:
-        if field in raw:
-            if not isinstance(raw[field], six.integer_types):
-                hc[field] = parse_nanoseconds_int(raw[field])
-            else:  # Conversion has been done previously
-                hc[field] = raw[field]
-    if 'retries' in raw:
-        hc['retries'] = raw['retries']
-
-    service_dict['healthcheck'] = hc
+        if field not in hc or isinstance(hc[field], six.integer_types):
+            continue
+        hc[field] = parse_nanoseconds_int(hc[field])
+
+    return service_dict
+
+
+def finalize_service_volumes(service_dict, environment):
+    if 'volumes' in service_dict:
+        finalized_volumes = []
+        normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+        for v in service_dict['volumes']:
+            if isinstance(v, dict):
+                finalized_volumes.append(MountSpec.parse(v, normalize))
+            else:
+                finalized_volumes.append(VolumeSpec.parse(v, normalize))
+        service_dict['volumes'] = finalized_volumes
+
     return service_dict
     return service_dict
 
 
 
 
@@ -831,12 +836,7 @@ def finalize_service(service_config, service_names, version, environment):
             for vf in service_dict['volumes_from']
             for vf in service_dict['volumes_from']
         ]
         ]
 
 
-    if 'volumes' in service_dict:
-        service_dict['volumes'] = [
-            VolumeSpec.parse(
-                v, environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
-            ) for v in service_dict['volumes']
-        ]
+    service_dict = finalize_service_volumes(service_dict, environment)
 
 
     if 'net' in service_dict:
     if 'net' in service_dict:
         network_mode = service_dict.pop('net')
         network_mode = service_dict.pop('net')
@@ -1032,6 +1032,7 @@ def merge_build(output, base, override):
     md.merge_mapping('args', parse_build_arguments)
     md.merge_mapping('args', parse_build_arguments)
     md.merge_field('cache_from', merge_unique_items_lists, default=[])
     md.merge_field('cache_from', merge_unique_items_lists, default=[])
     md.merge_mapping('labels', parse_labels)
     md.merge_mapping('labels', parse_labels)
+    md.merge_mapping('extra_hosts', parse_extra_hosts)
     return dict(md)
     return dict(md)
 
 
 
 
@@ -1084,6 +1085,12 @@ def merge_environment(base, override):
     return env
     return env
 
 
 
 
+def merge_labels(base, override):
+    labels = parse_labels(base)
+    labels.update(parse_labels(override))
+    return labels
+
+
 def split_kv(kvpair):
 def split_kv(kvpair):
     if '=' in kvpair:
     if '=' in kvpair:
         return kvpair.split('=', 1)
         return kvpair.split('=', 1)
@@ -1145,19 +1152,13 @@ def resolve_volume_paths(working_dir, service_dict):
 
 
 
 
 def resolve_volume_path(working_dir, volume):
 def resolve_volume_path(working_dir, volume):
-    mount_params = None
     if isinstance(volume, dict):
     if isinstance(volume, dict):
-        container_path = volume.get('target')
-        host_path = volume.get('source')
-        mode = None
-        if host_path:
-            if volume.get('read_only'):
-                mode = 'ro'
-            if volume.get('volume', {}).get('nocopy'):
-                mode = 'nocopy'
-        mount_params = (host_path, mode)
-    else:
-        container_path, mount_params = split_path_mapping(volume)
+        if volume.get('source', '').startswith('.') and volume['type'] == 'mount':
+            volume['source'] = expand_path(working_dir, volume['source'])
+        return volume
+
+    mount_params = None
+    container_path, mount_params = split_path_mapping(volume)
 
 
     if mount_params is not None:
     if mount_params is not None:
         host_path, mode = mount_params
         host_path, mode = mount_params

+ 3 - 1
compose/config/config_schema_v2.1.json

@@ -229,6 +229,7 @@
             }
             }
           ]
           ]
         },
         },
+        "oom_kill_disable": {"type": "boolean"},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "group_add": {
         "group_add": {
             "type": "array",
             "type": "array",
@@ -349,7 +350,8 @@
         },
         },
         "internal": {"type": "boolean"},
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "name": {"type": "string"}
       },
       },
       "additionalProperties": false
       "additionalProperties": false
     },
     },

+ 3 - 1
compose/config/config_schema_v2.2.json

@@ -235,6 +235,7 @@
             }
             }
           ]
           ]
         },
         },
+        "oom_kill_disable": {"type": "boolean"},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "group_add": {
         "group_add": {
             "type": "array",
             "type": "array",
@@ -356,7 +357,8 @@
         },
         },
         "internal": {"type": "boolean"},
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "name": {"type": "string"}
       },
       },
       "additionalProperties": false
       "additionalProperties": false
     },
     },

+ 38 - 3
compose/config/config_schema_v2.3.json

@@ -92,7 +92,8 @@
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "network": {"type": "string"},
                 "network": {"type": "string"},
                 "target": {"type": "string"},
                 "target": {"type": "string"},
-                "shm_size": {"type": ["integer", "string"]}
+                "shm_size": {"type": ["integer", "string"]},
+                "extra_hosts": {"$ref": "#/definitions/list_or_dict"}
               },
               },
               "additionalProperties": false
               "additionalProperties": false
             }
             }
@@ -237,6 +238,7 @@
             }
             }
           ]
           ]
         },
         },
+        "oom_kill_disable": {"type": "boolean"},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
         "group_add": {
         "group_add": {
             "type": "array",
             "type": "array",
@@ -291,7 +293,39 @@
         },
         },
         "user": {"type": "string"},
         "user": {"type": "string"},
         "userns_mode": {"type": "string"},
         "userns_mode": {"type": "string"},
-        "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "volumes": {
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {"type": "string"},
+              {
+                "type": "object",
+                "required": ["type"],
+                "additionalProperties": false,
+                "properties": {
+                  "type": {"type": "string"},
+                  "source": {"type": "string"},
+                  "target": {"type": "string"},
+                  "read_only": {"type": "boolean"},
+                  "consistency": {"type": "string"},
+                  "bind": {
+                    "type": "object",
+                    "properties": {
+                      "propagation": {"type": "string"}
+                    }
+                  },
+                  "volume": {
+                    "type": "object",
+                    "properties": {
+                      "nocopy": {"type": "boolean"}
+                    }
+                  }
+                }
+              }
+            ],
+            "uniqueItems": true
+          }
+        },
         "volume_driver": {"type": "string"},
         "volume_driver": {"type": "string"},
         "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
         "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
         "working_dir": {"type": "string"}
         "working_dir": {"type": "string"}
@@ -359,7 +393,8 @@
         },
         },
         "internal": {"type": "boolean"},
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "name": {"type": "string"}
       },
       },
       "additionalProperties": false
       "additionalProperties": false
     },
     },

+ 1 - 1
compose/config/config_schema_v3.0.json

@@ -294,7 +294,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }

+ 1 - 1
compose/config/config_schema_v3.1.json

@@ -323,7 +323,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }

+ 2 - 1
compose/config/config_schema_v3.2.json

@@ -245,6 +245,7 @@
               {
               {
                 "type": "object",
                 "type": "object",
                 "required": ["type"],
                 "required": ["type"],
+                "additionalProperties": false,
                 "properties": {
                 "properties": {
                   "type": {"type": "string"},
                   "type": {"type": "string"},
                   "source": {"type": "string"},
                   "source": {"type": "string"},
@@ -369,7 +370,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }

+ 2 - 1
compose/config/config_schema_v3.3.json

@@ -278,6 +278,7 @@
               {
               {
                 "type": "object",
                 "type": "object",
                 "required": ["type"],
                 "required": ["type"],
+                "additionalProperties": false,
                 "properties": {
                 "properties": {
                   "type": {"type": "string"},
                   "type": {"type": "string"},
                   "source": {"type": "string"},
                   "source": {"type": "string"},
@@ -412,7 +413,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }

+ 2 - 1
compose/config/config_schema_v3.4.json

@@ -282,6 +282,7 @@
               {
               {
                 "type": "object",
                 "type": "object",
                 "required": ["type"],
                 "required": ["type"],
+                "additionalProperties": false,
                 "properties": {
                 "properties": {
                   "type": {"type": "string"},
                   "type": {"type": "string"},
                   "source": {"type": "string"},
                   "source": {"type": "string"},
@@ -420,7 +421,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }

+ 45 - 14
compose/config/config_schema_v3.5.json

@@ -64,6 +64,7 @@
     }
     }
   },
   },
 
 
+  "patternProperties": {"^x-": {}},
   "additionalProperties": false,
   "additionalProperties": false,
 
 
   "definitions": {
   "definitions": {
@@ -154,6 +155,7 @@
         "hostname": {"type": "string"},
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
         "ipc": {"type": "string"},
+        "isolation": {"type": "string"},
         "labels": {"$ref": "#/definitions/list_or_dict"},
         "labels": {"$ref": "#/definitions/list_or_dict"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
 
@@ -299,7 +301,8 @@
                       "nocopy": {"type": "boolean"}
                       "nocopy": {"type": "boolean"}
                     }
                     }
                   }
                   }
-                }
+                },
+                "additionalProperties": false
               }
               }
             ],
             ],
             "uniqueItems": true
             "uniqueItems": true
@@ -316,7 +319,7 @@
       "additionalProperties": false,
       "additionalProperties": false,
       "properties": {
       "properties": {
         "disable": {"type": "boolean"},
         "disable": {"type": "boolean"},
-        "interval": {"type": "string"},
+        "interval": {"type": "string", "format": "duration"},
         "retries": {"type": "number"},
         "retries": {"type": "number"},
         "test": {
         "test": {
           "oneOf": [
           "oneOf": [
@@ -324,7 +327,8 @@
             {"type": "array", "items": {"type": "string"}}
             {"type": "array", "items": {"type": "string"}}
           ]
           ]
         },
         },
-        "timeout": {"type": "string"}
+        "timeout": {"type": "string", "format": "duration"},
+        "start_period": {"type": "string", "format": "duration"}
       }
       }
     },
     },
     "deployment": {
     "deployment": {
@@ -352,8 +356,23 @@
         "resources": {
         "resources": {
           "type": "object",
           "type": "object",
           "properties": {
           "properties": {
-            "limits": {"$ref": "#/definitions/resource"},
-            "reservations": {"$ref": "#/definitions/resource"}
+            "limits": {
+              "type": "object",
+              "properties": {
+                "cpus": {"type": "string"},
+                "memory": {"type": "string"}
+              },
+              "additionalProperties": false
+            },
+            "reservations": {
+              "type": "object",
+              "properties": {
+                "cpus": {"type": "string"},
+                "memory": {"type": "string"},
+                "generic_resources": {"$ref": "#/definitions/generic_resources"}
+              },
+              "additionalProperties": false
+            }
           },
           },
           "additionalProperties": false
           "additionalProperties": false
         },
         },
@@ -388,20 +407,30 @@
       "additionalProperties": false
       "additionalProperties": false
     },
     },
 
 
-    "resource": {
-      "id": "#/definitions/resource",
-      "type": "object",
-      "properties": {
-        "cpus": {"type": "string"},
-        "memory": {"type": "string"}
-      },
-      "additionalProperties": false
+    "generic_resources": {
+      "id": "#/definitions/generic_resources",
+      "type": "array",
+      "items": {
+        "type": "object",
+        "properties": {
+          "discrete_resource_spec": {
+            "type": "object",
+            "properties": {
+              "kind": {"type": "string"},
+              "value": {"type": "number"}
+            },
+            "additionalProperties": false
+          }
+        },
+        "additionalProperties": false
+      }
     },
     },
 
 
     "network": {
     "network": {
       "id": "#/definitions/network",
       "id": "#/definitions/network",
       "type": ["object", "null"],
       "type": ["object", "null"],
       "properties": {
       "properties": {
+        "name": {"type": "string"},
         "driver": {"type": "string"},
         "driver": {"type": "string"},
         "driver_opts": {
         "driver_opts": {
           "type": "object",
           "type": "object",
@@ -418,7 +447,7 @@
               "items": {
               "items": {
                 "type": "object",
                 "type": "object",
                 "properties": {
                 "properties": {
-                  "subnet": {"type": "string"}
+                  "subnet": {"type": "string", "format": "subnet_ip_address"}
                 },
                 },
                 "additionalProperties": false
                 "additionalProperties": false
               }
               }
@@ -468,6 +497,7 @@
       "id": "#/definitions/secret",
       "id": "#/definitions/secret",
       "type": "object",
       "type": "object",
       "properties": {
       "properties": {
+        "name": {"type": "string"},
         "file": {"type": "string"},
         "file": {"type": "string"},
         "external": {
         "external": {
           "type": ["boolean", "object"],
           "type": ["boolean", "object"],
@@ -484,6 +514,7 @@
       "id": "#/definitions/config",
       "id": "#/definitions/config",
       "type": "object",
       "type": "object",
       "properties": {
       "properties": {
+        "name": {"type": "string"},
         "file": {"type": "string"},
         "file": {"type": "string"},
         "external": {
         "external": {
           "type": ["boolean", "object"],
           "type": ["boolean", "object"],

+ 1 - 1
compose/config/environment.py

@@ -32,7 +32,7 @@ def env_vars_from_file(filename):
     elif not os.path.isfile(filename):
     elif not os.path.isfile(filename):
         raise ConfigurationError("%s is not a file." % (filename))
         raise ConfigurationError("%s is not a file." % (filename))
     env = {}
     env = {}
-    with contextlib.closing(codecs.open(filename, 'r', 'utf-8')) as fileobj:
+    with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
         for line in fileobj:
         for line in fileobj:
             line = line.strip()
             line = line.strip()
             if line and not line.startswith('#'):
             if line and not line.startswith('#'):

+ 92 - 6
compose/config/interpolation.py

@@ -2,6 +2,7 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
 import logging
 import logging
+import re
 from string import Template
 from string import Template
 
 
 import six
 import six
@@ -44,9 +45,13 @@ def interpolate_environment_variables(version, config, section, environment):
     )
     )
 
 
 
 
+def get_config_path(config_key, section, name):
+    return '{}.{}.{}'.format(section, name, config_key)
+
+
 def interpolate_value(name, config_key, value, section, interpolator):
 def interpolate_value(name, config_key, value, section, interpolator):
     try:
     try:
-        return recursive_interpolate(value, interpolator)
+        return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name))
     except InvalidInterpolation as e:
     except InvalidInterpolation as e:
         raise ConfigurationError(
         raise ConfigurationError(
             'Invalid interpolation format for "{config_key}" option '
             'Invalid interpolation format for "{config_key}" option '
@@ -57,21 +62,24 @@ def interpolate_value(name, config_key, value, section, interpolator):
                 string=e.string))
                 string=e.string))
 
 
 
 
-def recursive_interpolate(obj, interpolator):
+def recursive_interpolate(obj, interpolator, config_path):
+    def append(config_path, key):
+        return '{}.{}'.format(config_path, key)
+
     if isinstance(obj, six.string_types):
     if isinstance(obj, six.string_types):
-        return interpolator.interpolate(obj)
+        return converter.convert(config_path, interpolator.interpolate(obj))
     if isinstance(obj, dict):
     if isinstance(obj, dict):
         return dict(
         return dict(
-            (key, recursive_interpolate(val, interpolator))
+            (key, recursive_interpolate(val, interpolator, append(config_path, key)))
             for (key, val) in obj.items()
             for (key, val) in obj.items()
         )
         )
     if isinstance(obj, list):
     if isinstance(obj, list):
-        return [recursive_interpolate(val, interpolator) for val in obj]
+        return [recursive_interpolate(val, interpolator, config_path) for val in obj]
     return obj
     return obj
 
 
 
 
 class TemplateWithDefaults(Template):
 class TemplateWithDefaults(Template):
-    idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
+    idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]*)?'
 
 
     # Modified from python2.7/string.py
     # Modified from python2.7/string.py
     def substitute(self, mapping):
     def substitute(self, mapping):
@@ -100,3 +108,81 @@ class TemplateWithDefaults(Template):
 class InvalidInterpolation(Exception):
 class InvalidInterpolation(Exception):
     def __init__(self, string):
     def __init__(self, string):
         self.string = string
         self.string = string
+
+
+PATH_JOKER = '[^.]+'
+
+
+def re_path(*args):
+    return re.compile('^{}$'.format('.'.join(args)))
+
+
+def re_path_basic(section, name):
+    return re_path(section, PATH_JOKER, name)
+
+
+def service_path(*args):
+    return re_path('service', PATH_JOKER, *args)
+
+
+def to_boolean(s):
+    s = s.lower()
+    if s in ['y', 'yes', 'true', 'on']:
+        return True
+    elif s in ['n', 'no', 'false', 'off']:
+        return False
+    raise ValueError('"{}" is not a valid boolean value'.format(s))
+
+
+def to_int(s):
+    # We must be able to handle octal representation for `mode` values notably
+    if six.PY3 and re.match('^0[0-9]+$', s.strip()):
+        s = '0o' + s[1:]
+    return int(s, base=0)
+
+
+class ConversionMap(object):
+    map = {
+        service_path('blkio_config', 'weight'): to_int,
+        service_path('blkio_config', 'weight_device', 'weight'): to_int,
+        service_path('cpus'): float,
+        service_path('cpu_count'): to_int,
+        service_path('configs', 'mode'): to_int,
+        service_path('secrets', 'mode'): to_int,
+        service_path('healthcheck', 'retries'): to_int,
+        service_path('healthcheck', 'disable'): to_boolean,
+        service_path('deploy', 'replicas'): to_int,
+        service_path('deploy', 'update_config', 'parallelism'): to_int,
+        service_path('deploy', 'update_config', 'max_failure_ratio'): float,
+        service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
+        service_path('mem_swappiness'): to_int,
+        service_path('oom_kill_disable'): to_boolean,
+        service_path('oom_score_adj'): to_int,
+        service_path('ports', 'target'): to_int,
+        service_path('ports', 'published'): to_int,
+        service_path('scale'): to_int,
+        service_path('ulimits', PATH_JOKER): to_int,
+        service_path('ulimits', PATH_JOKER, 'soft'): to_int,
+        service_path('ulimits', PATH_JOKER, 'hard'): to_int,
+        service_path('privileged'): to_boolean,
+        service_path('read_only'): to_boolean,
+        service_path('stdin_open'): to_boolean,
+        service_path('tty'): to_boolean,
+        service_path('volumes', 'read_only'): to_boolean,
+        service_path('volumes', 'volume', 'nocopy'): to_boolean,
+        re_path_basic('network', 'attachable'): to_boolean,
+        re_path_basic('network', 'external'): to_boolean,
+        re_path_basic('network', 'internal'): to_boolean,
+        re_path_basic('volume', 'external'): to_boolean,
+        re_path_basic('secret', 'external'): to_boolean,
+        re_path_basic('config', 'external'): to_boolean,
+    }
+
+    def convert(self, path, value):
+        for rexp in self.map.keys():
+            if rexp.match(path):
+                return self.map[rexp](value)
+        return value
+
+
+converter = ConversionMap()

+ 15 - 1
compose/config/serialize.py

@@ -7,9 +7,11 @@ import yaml
 from compose.config import types
 from compose.config import types
 from compose.const import COMPOSEFILE_V1 as V1
 from compose.const import COMPOSEFILE_V1 as V1
 from compose.const import COMPOSEFILE_V2_1 as V2_1
 from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_3 as V2_3
 from compose.const import COMPOSEFILE_V3_0 as V3_0
 from compose.const import COMPOSEFILE_V3_0 as V3_0
 from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import COMPOSEFILE_V3_4 as V3_4
 from compose.const import COMPOSEFILE_V3_4 as V3_4
+from compose.const import COMPOSEFILE_V3_5 as V3_5
 
 
 
 
 def serialize_config_type(dumper, data):
 def serialize_config_type(dumper, data):
@@ -34,6 +36,7 @@ def serialize_string(dumper, data):
     return representer(data)
     return representer(data)
 
 
 
 
+yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
 yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
 yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
 yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
 yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
 yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
 yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
@@ -67,7 +70,8 @@ def denormalize_config(config, image_digests=None):
                 del conf['external_name']
                 del conf['external_name']
 
 
             if 'name' in conf:
             if 'name' in conf:
-                if config.version < V2_1 or (config.version >= V3_0 and config.version < V3_4):
+                if config.version < V2_1 or (
+                        config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
                     del conf['name']
                     del conf['name']
                 elif 'external' in conf:
                 elif 'external' in conf:
                     conf['external'] = True
                     conf['external'] = True
@@ -75,6 +79,12 @@ def denormalize_config(config, image_digests=None):
     return result
     return result
 
 
 
 
+def v3_introduced_name_key(key):
+    if key == 'volumes':
+        return V3_4
+    return V3_5
+
+
 def serialize_config(config, image_digests=None):
 def serialize_config(config, image_digests=None):
     return yaml.safe_dump(
     return yaml.safe_dump(
         denormalize_config(config, image_digests),
         denormalize_config(config, image_digests),
@@ -141,5 +151,9 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
             p.legacy_repr() if isinstance(p, types.ServicePort) else p
             p.legacy_repr() if isinstance(p, types.ServicePort) else p
             for p in service_dict['ports']
             for p in service_dict['ports']
         ]
         ]
+    if 'volumes' in service_dict and (version < V2_3 or (version > V3_0 and version < V3_2)):
+        service_dict['volumes'] = [
+            v.legacy_repr() if isinstance(v, types.MountSpec) else v for v in service_dict['volumes']
+        ]
 
 
     return service_dict
     return service_dict

+ 71 - 7
compose/config/types.py

@@ -133,6 +133,61 @@ def normalize_path_for_engine(path):
     return path.replace('\\', '/')
     return path.replace('\\', '/')
 
 
 
 
+class MountSpec(object):
+    options_map = {
+        'volume': {
+            'nocopy': 'no_copy'
+        },
+        'bind': {
+            'propagation': 'propagation'
+        }
+    }
+    _fields = ['type', 'source', 'target', 'read_only', 'consistency']
+
+    @classmethod
+    def parse(cls, mount_dict, normalize=False):
+        if mount_dict.get('source'):
+            mount_dict['source'] = os.path.normpath(mount_dict['source'])
+            if normalize:
+                mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
+
+        return cls(**mount_dict)
+
+    def __init__(self, type, source=None, target=None, read_only=None, consistency=None, **kwargs):
+        self.type = type
+        self.source = source
+        self.target = target
+        self.read_only = read_only
+        self.consistency = consistency
+        self.options = None
+        if self.type in kwargs:
+            self.options = kwargs[self.type]
+
+    def as_volume_spec(self):
+        mode = 'ro' if self.read_only else 'rw'
+        return VolumeSpec(external=self.source, internal=self.target, mode=mode)
+
+    def legacy_repr(self):
+        return self.as_volume_spec().repr()
+
+    def repr(self):
+        res = {}
+        for field in self._fields:
+            if getattr(self, field, None):
+                res[field] = getattr(self, field)
+        if self.options:
+            res[self.type] = self.options
+        return res
+
+    @property
+    def is_named_volume(self):
+        return self.type == 'volume' and self.source
+
+    @property
+    def external(self):
+        return self.source
+
+
 class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
 class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
 
 
     @classmethod
     @classmethod
@@ -238,17 +293,18 @@ class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
         return self.alias
         return self.alias
 
 
 
 
-class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode')):
+class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode name')):
     @classmethod
     @classmethod
     def parse(cls, spec):
     def parse(cls, spec):
         if isinstance(spec, six.string_types):
         if isinstance(spec, six.string_types):
-            return cls(spec, None, None, None, None)
+            return cls(spec, None, None, None, None, None)
         return cls(
         return cls(
             spec.get('source'),
             spec.get('source'),
             spec.get('target'),
             spec.get('target'),
             spec.get('uid'),
             spec.get('uid'),
             spec.get('gid'),
             spec.get('gid'),
             spec.get('mode'),
             spec.get('mode'),
+            spec.get('name')
         )
         )
 
 
     @property
     @property
@@ -277,11 +333,19 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
         except ValueError:
         except ValueError:
             raise ConfigurationError('Invalid target port: {}'.format(target))
             raise ConfigurationError('Invalid target port: {}'.format(target))
 
 
-        try:
-            if published:
-                published = int(published)
-        except ValueError:
-            raise ConfigurationError('Invalid published port: {}'.format(published))
+        if published:
+            if isinstance(published, six.string_types) and '-' in published:  # "x-y:z" format
+                a, b = published.split('-', 1)
+                try:
+                    int(a)
+                    int(b)
+                except ValueError:
+                    raise ConfigurationError('Invalid published port: {}'.format(published))
+            else:
+                try:
+                    published = int(published)
+                except ValueError:
+                    raise ConfigurationError('Invalid published port: {}'.format(published))
 
 
         return super(ServicePort, cls).__new__(
         return super(ServicePort, cls).__new__(
             cls, target, published, *args, **kwargs
             cls, target, published, *args, **kwargs

+ 60 - 1
compose/config/validation.py

@@ -44,6 +44,31 @@ DOCKER_CONFIG_HINTS = {
 VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
 VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
 VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
 VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
 
 
+VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
+VALID_IPV4_ADDR = "({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
+VALID_REGEX_IPV4_CIDR = "^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
+
+VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
+VALID_REGEX_IPV6_CIDR = "".join("""
+^
+(
+    (({IPV6_SEG}:){{7}}{IPV6_SEG})|
+    (({IPV6_SEG}:){{1,7}}:)|
+    (({IPV6_SEG}:){{1,6}}(:{IPV6_SEG}){{1,1}})|
+    (({IPV6_SEG}:){{1,5}}(:{IPV6_SEG}){{1,2}})|
+    (({IPV6_SEG}:){{1,4}}(:{IPV6_SEG}){{1,3}})|
+    (({IPV6_SEG}:){{1,3}}(:{IPV6_SEG}){{1,4}})|
+    (({IPV6_SEG}:){{1,2}}(:{IPV6_SEG}){{1,5}})|
+    (({IPV6_SEG}:){{1,1}}(:{IPV6_SEG}){{1,6}})|
+    (:((:{IPV6_SEG}){{1,7}}|:))|
+    (fe80:(:{IPV6_SEG}){{0,4}}%[0-9a-zA-Z]{{1,}})|
+    (::(ffff(:0{{1,4}}){{0,1}}:){{0,1}}{IPV4_ADDR})|
+    (({IPV6_SEG}:){{1,4}}:{IPV4_ADDR})
+)
+/(\d|[1-9]\d|1[0-1]\d|12[0-8])
+$
+""".format(IPV6_SEG=VALID_IPV6_SEG, IPV4_ADDR=VALID_IPV4_ADDR).split())
+
 
 
 @FormatChecker.cls_checks(format="ports", raises=ValidationError)
 @FormatChecker.cls_checks(format="ports", raises=ValidationError)
 def format_ports(instance):
 def format_ports(instance):
@@ -64,6 +89,16 @@ def format_expose(instance):
     return True
     return True
 
 
 
 
[email protected]_checks("subnet_ip_address", raises=ValidationError)
+def format_subnet_ip_address(instance):
+    if isinstance(instance, six.string_types):
+        if not re.match(VALID_REGEX_IPV4_CIDR, instance) and \
+                not re.match(VALID_REGEX_IPV6_CIDR, instance):
+            raise ValidationError("should use the CIDR format")
+
+    return True
+
+
 def match_named_volumes(service_dict, project_volumes):
 def match_named_volumes(service_dict, project_volumes):
     service_volumes = service_dict.get('volumes', [])
     service_volumes = service_dict.get('volumes', [])
     for volume_spec in service_volumes:
     for volume_spec in service_volumes:
@@ -391,7 +426,7 @@ def process_config_schema_errors(error):
 
 
 def validate_against_config_schema(config_file):
 def validate_against_config_schema(config_file):
     schema = load_jsonschema(config_file)
     schema = load_jsonschema(config_file)
-    format_checker = FormatChecker(["ports", "expose"])
+    format_checker = FormatChecker(["ports", "expose", "subnet_ip_address"])
     validator = Draft4Validator(
     validator = Draft4Validator(
         schema,
         schema,
         resolver=RefResolver(get_resolver_path(), schema),
         resolver=RefResolver(get_resolver_path(), schema),
@@ -465,3 +500,27 @@ def handle_errors(errors, format_error_func, filename):
         "The Compose file{file_msg} is invalid because:\n{error_msg}".format(
         "The Compose file{file_msg} is invalid because:\n{error_msg}".format(
             file_msg=" '{}'".format(filename) if filename else "",
             file_msg=" '{}'".format(filename) if filename else "",
             error_msg=error_msg))
             error_msg=error_msg))
+
+
+def validate_healthcheck(service_config):
+    healthcheck = service_config.config.get('healthcheck', {})
+
+    if 'test' in healthcheck and isinstance(healthcheck['test'], list):
+        if len(healthcheck['test']) == 0:
+            raise ConfigurationError(
+                'Service "{}" defines an invalid healthcheck: '
+                '"test" is an empty list'
+                .format(service_config.name))
+
+        # when disable is true config.py::process_healthcheck adds "test: ['NONE']" to service_config
+        elif healthcheck['test'][0] == 'NONE' and len(healthcheck) > 1:
+            raise ConfigurationError(
+                'Service "{}" defines an invalid healthcheck: '
+                '"disable: true" cannot be combined with other options'
+                .format(service_config.name))
+
+        elif healthcheck['test'][0] not in ('NONE', 'CMD', 'CMD-SHELL'):
+            raise ConfigurationError(
+                'Service "{}" defines an invalid healthcheck: '
+                'when "test" is a list the first item must be either NONE, CMD or CMD-SHELL'
+                .format(service_config.name))

+ 14 - 11
compose/network.py

@@ -25,21 +25,22 @@ OPTS_EXCEPTIONS = [
 
 
 class Network(object):
 class Network(object):
     def __init__(self, client, project, name, driver=None, driver_opts=None,
     def __init__(self, client, project, name, driver=None, driver_opts=None,
-                 ipam=None, external_name=None, internal=False, enable_ipv6=False,
-                 labels=None):
+                 ipam=None, external=False, internal=False, enable_ipv6=False,
+                 labels=None, custom_name=False):
         self.client = client
         self.client = client
         self.project = project
         self.project = project
         self.name = name
         self.name = name
         self.driver = driver
         self.driver = driver
         self.driver_opts = driver_opts
         self.driver_opts = driver_opts
         self.ipam = create_ipam_config_from_dict(ipam)
         self.ipam = create_ipam_config_from_dict(ipam)
-        self.external_name = external_name
+        self.external = external
         self.internal = internal
         self.internal = internal
         self.enable_ipv6 = enable_ipv6
         self.enable_ipv6 = enable_ipv6
         self.labels = labels
         self.labels = labels
+        self.custom_name = custom_name
 
 
     def ensure(self):
     def ensure(self):
-        if self.external_name:
+        if self.external:
             try:
             try:
                 self.inspect()
                 self.inspect()
                 log.debug(
                 log.debug(
@@ -51,7 +52,7 @@ class Network(object):
                     'Network {name} declared as external, but could'
                     'Network {name} declared as external, but could'
                     ' not be found. Please create the network manually'
                     ' not be found. Please create the network manually'
                     ' using `{command} {name}` and try again.'.format(
                     ' using `{command} {name}` and try again.'.format(
-                        name=self.external_name,
+                        name=self.full_name,
                         command='docker network create'
                         command='docker network create'
                     )
                     )
                 )
                 )
@@ -83,7 +84,7 @@ class Network(object):
             )
             )
 
 
     def remove(self):
     def remove(self):
-        if self.external_name:
+        if self.external:
             log.info("Network %s is external, skipping", self.full_name)
             log.info("Network %s is external, skipping", self.full_name)
             return
             return
 
 
@@ -95,8 +96,8 @@ class Network(object):
 
 
     @property
     @property
     def full_name(self):
     def full_name(self):
-        if self.external_name:
-            return self.external_name
+        if self.custom_name:
+            return self.name
         return '{0}_{1}'.format(self.project, self.name)
         return '{0}_{1}'.format(self.project, self.name)
 
 
     @property
     @property
@@ -116,7 +117,7 @@ def create_ipam_config_from_dict(ipam_dict):
         return None
         return None
 
 
     return IPAMConfig(
     return IPAMConfig(
-        driver=ipam_dict.get('driver'),
+        driver=ipam_dict.get('driver') or 'default',
         pool_configs=[
         pool_configs=[
             IPAMPool(
             IPAMPool(
                 subnet=config.get('subnet'),
                 subnet=config.get('subnet'),
@@ -203,14 +204,16 @@ def build_networks(name, config_data, client):
     network_config = config_data.networks or {}
     network_config = config_data.networks or {}
     networks = {
     networks = {
         network_name: Network(
         network_name: Network(
-            client=client, project=name, name=network_name,
+            client=client, project=name,
+            name=data.get('name', network_name),
             driver=data.get('driver'),
             driver=data.get('driver'),
             driver_opts=data.get('driver_opts'),
             driver_opts=data.get('driver_opts'),
             ipam=data.get('ipam'),
             ipam=data.get('ipam'),
-            external_name=data.get('external_name'),
+            external=bool(data.get('external', False)),
             internal=data.get('internal'),
             internal=data.get('internal'),
             enable_ipv6=data.get('enable_ipv6'),
             enable_ipv6=data.get('enable_ipv6'),
             labels=data.get('labels'),
             labels=data.get('labels'),
+            custom_name=data.get('name') is not None,
         )
         )
         for network_name, data in network_config.items()
         for network_name, data in network_config.items()
     }
     }

+ 16 - 7
compose/parallel.py

@@ -26,7 +26,7 @@ log = logging.getLogger(__name__)
 STOP = object()
 STOP = object()
 
 
 
 
-def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, parent_objects=None):
     """Runs func on objects in parallel while ensuring that func is
     """Runs func on objects in parallel while ensuring that func is
     ran on object only after it is ran on all its dependencies.
     ran on object only after it is ran on all its dependencies.
 
 
@@ -37,9 +37,19 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
     stream = get_output_stream(sys.stderr)
     stream = get_output_stream(sys.stderr)
 
 
     writer = ParallelStreamWriter(stream, msg)
     writer = ParallelStreamWriter(stream, msg)
-    for obj in objects:
+
+    if parent_objects:
+        display_objects = list(parent_objects)
+    else:
+        display_objects = objects
+
+    for obj in display_objects:
         writer.add_object(get_name(obj))
         writer.add_object(get_name(obj))
-    writer.write_initial()
+
+    # write data in a second loop to consider all objects for width alignment
+    # and avoid duplicates when parent_objects exists
+    for obj in objects:
+        writer.write_initial(get_name(obj))
 
 
     events = parallel_execute_iter(objects, func, get_deps, limit)
     events = parallel_execute_iter(objects, func, get_deps, limit)
 
 
@@ -237,12 +247,11 @@ class ParallelStreamWriter(object):
         self.lines.append(obj_index)
         self.lines.append(obj_index)
         self.width = max(self.width, len(obj_index))
         self.width = max(self.width, len(obj_index))
 
 
-    def write_initial(self):
+    def write_initial(self, obj_index):
         if self.msg is None:
         if self.msg is None:
             return
             return
-        for line in self.lines:
-            self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
-                              width=self.width))
+        self.stream.write("{} {:<{width}} ... \r\n".format(
+            self.msg, self.lines[self.lines.index(obj_index)], width=self.width))
         self.stream.flush()
         self.stream.flush()
 
 
     def _write_ansi(self, obj_index, status):
     def _write_ansi(self, obj_index, status):

+ 30 - 6
compose/project.py

@@ -29,6 +29,7 @@ from .service import ConvergenceStrategy
 from .service import NetworkMode
 from .service import NetworkMode
 from .service import PidMode
 from .service import PidMode
 from .service import Service
 from .service import Service
+from .service import ServiceName
 from .service import ServiceNetworkMode
 from .service import ServiceNetworkMode
 from .service import ServicePidMode
 from .service import ServicePidMode
 from .utils import microseconds_from_time_nano
 from .utils import microseconds_from_time_nano
@@ -190,6 +191,25 @@ class Project(object):
             service.remove_duplicate_containers()
             service.remove_duplicate_containers()
         return services
         return services
 
 
+    def get_scaled_services(self, services, scale_override):
+        """
+        Returns a list of this project's services as scaled ServiceName objects.
+
+        services: a list of Service objects
+        scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
+        """
+        service_names = []
+        for service in services:
+            if service.name in scale_override:
+                scale = scale_override[service.name]
+            else:
+                scale = service.scale_num
+
+            for i in range(1, scale + 1):
+                service_names.append(ServiceName(self.name, service.name, i))
+
+        return service_names
+
     def get_links(self, service_dict):
     def get_links(self, service_dict):
         links = []
         links = []
         if 'links' in service_dict:
         if 'links' in service_dict:
@@ -310,8 +330,8 @@ class Project(object):
             service_names, stopped=True, one_off=one_off
             service_names, stopped=True, one_off=one_off
         ), options)
         ), options)
 
 
-    def down(self, remove_image_type, include_volumes, remove_orphans=False):
-        self.stop(one_off=OneOffFilter.include)
+    def down(self, remove_image_type, include_volumes, remove_orphans=False, timeout=None):
+        self.stop(one_off=OneOffFilter.include, timeout=timeout)
         self.find_orphan_containers(remove_orphans)
         self.find_orphan_containers(remove_orphans)
         self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
         self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
 
 
@@ -337,10 +357,11 @@ class Project(object):
         )
         )
         return containers
         return containers
 
 
-    def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
+    def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
+              build_args=None):
         for service in self.get_services(service_names):
         for service in self.get_services(service_names):
             if service.can_be_built():
             if service.can_be_built():
-                service.build(no_cache, pull, force_rm, build_args)
+                service.build(no_cache, pull, force_rm, memory, build_args)
             else:
             else:
                 log.info('%s uses an image, skipping' % service.name)
                 log.info('%s uses an image, skipping' % service.name)
 
 
@@ -430,15 +451,18 @@ class Project(object):
         for svc in services:
         for svc in services:
             svc.ensure_image_exists(do_build=do_build)
             svc.ensure_image_exists(do_build=do_build)
         plans = self._get_convergence_plans(services, strategy)
         plans = self._get_convergence_plans(services, strategy)
+        scaled_services = self.get_scaled_services(services, scale_override)
 
 
         def do(service):
         def do(service):
+
             return service.execute_convergence_plan(
             return service.execute_convergence_plan(
                 plans[service.name],
                 plans[service.name],
                 timeout=timeout,
                 timeout=timeout,
                 detached=detached,
                 detached=detached,
                 scale_override=scale_override.get(service.name),
                 scale_override=scale_override.get(service.name),
                 rescale=rescale,
                 rescale=rescale,
-                start=start
+                start=start,
+                project_services=scaled_services
             )
             )
 
 
         def get_deps(service):
         def get_deps(service):
@@ -624,7 +648,7 @@ def get_secrets(service, service_secrets, secret_defs):
                 "Service \"{service}\" uses an undefined secret \"{secret}\" "
                 "Service \"{service}\" uses an undefined secret \"{secret}\" "
                 .format(service=service, secret=secret.source))
                 .format(service=service, secret=secret.source))
 
 
-        if secret_def.get('external_name'):
+        if secret_def.get('external'):
             log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
             log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
                      "External secrets are not available to containers created by "
                      "External secrets are not available to containers created by "
                      "docker-compose.".format(service=service, secret=secret.source))
                      "docker-compose.".format(service=service, secret=secret.source))

+ 102 - 36
compose/service.py

@@ -14,6 +14,9 @@ from docker.errors import APIError
 from docker.errors import ImageNotFound
 from docker.errors import ImageNotFound
 from docker.errors import NotFound
 from docker.errors import NotFound
 from docker.types import LogConfig
 from docker.types import LogConfig
+from docker.types import Mount
+from docker.utils import version_gte
+from docker.utils import version_lt
 from docker.utils.ports import build_port_bindings
 from docker.utils.ports import build_port_bindings
 from docker.utils.ports import split_port
 from docker.utils.ports import split_port
 from docker.utils.utils import convert_tmpfs_mounts
 from docker.utils.utils import convert_tmpfs_mounts
@@ -23,7 +26,9 @@ from . import const
 from . import progress_stream
 from . import progress_stream
 from .config import DOCKER_CONFIG_KEYS
 from .config import DOCKER_CONFIG_KEYS
 from .config import merge_environment
 from .config import merge_environment
+from .config import merge_labels
 from .config.errors import DependencyError
 from .config.errors import DependencyError
+from .config.types import MountSpec
 from .config.types import ServicePort
 from .config.types import ServicePort
 from .config.types import VolumeSpec
 from .config.types import VolumeSpec
 from .const import DEFAULT_TIMEOUT
 from .const import DEFAULT_TIMEOUT
@@ -76,6 +81,7 @@ HOST_CONFIG_KEYS = [
     'mem_reservation',
     'mem_reservation',
     'memswap_limit',
     'memswap_limit',
     'mem_swappiness',
     'mem_swappiness',
+    'oom_kill_disable',
     'oom_score_adj',
     'oom_score_adj',
     'pid',
     'pid',
     'pids_limit',
     'pids_limit',
@@ -378,11 +384,11 @@ class Service(object):
 
 
         return has_diverged
         return has_diverged
 
 
-    def _execute_convergence_create(self, scale, detached, start):
+    def _execute_convergence_create(self, scale, detached, start, project_services=None):
             i = self._next_container_number()
             i = self._next_container_number()
 
 
             def create_and_start(service, n):
             def create_and_start(service, n):
-                container = service.create_container(number=n)
+                container = service.create_container(number=n, quiet=True)
                 if not detached:
                 if not detached:
                     container.attach_log_stream()
                     container.attach_log_stream()
                 if start:
                 if start:
@@ -390,10 +396,11 @@ class Service(object):
                 return container
                 return container
 
 
             containers, errors = parallel_execute(
             containers, errors = parallel_execute(
-                range(i, i + scale),
-                lambda n: create_and_start(self, n),
-                lambda n: self.get_container_name(n),
+                [ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
+                lambda service_name: create_and_start(self, service_name.number),
+                lambda service_name: self.get_container_name(service_name.service, service_name.number),
                 "Creating",
                 "Creating",
+                parent_objects=project_services
             )
             )
             for error in errors.values():
             for error in errors.values():
                 raise OperationFailedError(error)
                 raise OperationFailedError(error)
@@ -432,7 +439,7 @@ class Service(object):
             if start:
             if start:
                 _, errors = parallel_execute(
                 _, errors = parallel_execute(
                     containers,
                     containers,
-                    lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+                    lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
                     lambda c: c.name,
                     lambda c: c.name,
                     "Starting",
                     "Starting",
                 )
                 )
@@ -459,7 +466,7 @@ class Service(object):
         )
         )
 
 
     def execute_convergence_plan(self, plan, timeout=None, detached=False,
     def execute_convergence_plan(self, plan, timeout=None, detached=False,
-                                 start=True, scale_override=None, rescale=True):
+                                 start=True, scale_override=None, rescale=True, project_services=None):
         (action, containers) = plan
         (action, containers) = plan
         scale = scale_override if scale_override is not None else self.scale_num
         scale = scale_override if scale_override is not None else self.scale_num
         containers = sorted(containers, key=attrgetter('number'))
         containers = sorted(containers, key=attrgetter('number'))
@@ -468,7 +475,7 @@ class Service(object):
 
 
         if action == 'create':
         if action == 'create':
             return self._execute_convergence_create(
             return self._execute_convergence_create(
-                scale, detached, start
+                scale, detached, start, project_services
             )
             )
 
 
         # The create action needs always needs an initial scale, but otherwise,
         # The create action needs always needs an initial scale, but otherwise,
@@ -510,7 +517,6 @@ class Service(object):
         volumes can be copied to the new container, before the original
         volumes can be copied to the new container, before the original
         container is removed.
         container is removed.
         """
         """
-        log.info("Recreating %s" % container.name)
 
 
         container.stop(timeout=self.stop_timeout(timeout))
         container.stop(timeout=self.stop_timeout(timeout))
         container.rename_to_tmp_name()
         container.rename_to_tmp_name()
@@ -741,21 +747,26 @@ class Service(object):
         container_options.update(override_options)
         container_options.update(override_options)
 
 
         if not container_options.get('name'):
         if not container_options.get('name'):
-            container_options['name'] = self.get_container_name(number, one_off)
+            container_options['name'] = self.get_container_name(self.name, number, one_off)
 
 
         container_options.setdefault('detach', True)
         container_options.setdefault('detach', True)
 
 
         # If a qualified hostname was given, split it into an
         # If a qualified hostname was given, split it into an
         # unqualified hostname and a domainname unless domainname
         # unqualified hostname and a domainname unless domainname
-        # was also given explicitly. This matches the behavior of
-        # the official Docker CLI in that scenario.
-        if ('hostname' in container_options and
+        # was also given explicitly. This matches behavior
+        # until Docker Engine 1.11.0 - Docker API 1.23.
+        if (version_lt(self.client.api_version, '1.23') and
+                'hostname' in container_options and
                 'domainname' not in container_options and
                 'domainname' not in container_options and
                 '.' in container_options['hostname']):
                 '.' in container_options['hostname']):
             parts = container_options['hostname'].partition('.')
             parts = container_options['hostname'].partition('.')
             container_options['hostname'] = parts[0]
             container_options['hostname'] = parts[0]
             container_options['domainname'] = parts[2]
             container_options['domainname'] = parts[2]
 
 
+        if (version_gte(self.client.api_version, '1.25') and
+                'stop_grace_period' in self.options):
+            container_options['stop_timeout'] = self.stop_timeout(None)
+
         if 'ports' in container_options or 'expose' in self.options:
         if 'ports' in container_options or 'expose' in self.options:
             container_options['ports'] = build_container_ports(
             container_options['ports'] = build_container_ports(
                 formatted_ports(container_options.get('ports', [])),
                 formatted_ports(container_options.get('ports', [])),
@@ -770,21 +781,38 @@ class Service(object):
             self.options.get('environment'),
             self.options.get('environment'),
             override_options.get('environment'))
             override_options.get('environment'))
 
 
+        container_options['labels'] = merge_labels(
+            self.options.get('labels'),
+            override_options.get('labels'))
+
+        container_volumes = []
+        container_mounts = []
+        if 'volumes' in container_options:
+            container_volumes = [
+                v for v in container_options.get('volumes') if isinstance(v, VolumeSpec)
+            ]
+            container_mounts = [v for v in container_options.get('volumes') if isinstance(v, MountSpec)]
+
         binds, affinity = merge_volume_bindings(
         binds, affinity = merge_volume_bindings(
-            container_options.get('volumes') or [],
-            self.options.get('tmpfs') or [],
-            previous_container)
+            container_volumes, self.options.get('tmpfs') or [], previous_container,
+            container_mounts
+        )
         override_options['binds'] = binds
         override_options['binds'] = binds
         container_options['environment'].update(affinity)
         container_options['environment'].update(affinity)
 
 
-        container_options['volumes'] = dict(
-            (v.internal, {}) for v in container_options.get('volumes') or {})
+        container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
+        override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
 
 
         secret_volumes = self.get_secret_volumes()
         secret_volumes = self.get_secret_volumes()
         if secret_volumes:
         if secret_volumes:
-            override_options['binds'].extend(v.repr() for v in secret_volumes)
-            container_options['volumes'].update(
-                (v.internal, {}) for v in secret_volumes)
+            if version_lt(self.client.api_version, '1.30'):
+                override_options['binds'].extend(v.legacy_repr() for v in secret_volumes)
+                container_options['volumes'].update(
+                    (v.target, {}) for v in secret_volumes
+                )
+            else:
+                override_options['mounts'] = override_options.get('mounts') or []
+                override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
 
 
         container_options['image'] = self.image_name
         container_options['image'] = self.image_name
 
 
@@ -857,6 +885,7 @@ class Service(object):
             sysctls=options.get('sysctls'),
             sysctls=options.get('sysctls'),
             pids_limit=options.get('pids_limit'),
             pids_limit=options.get('pids_limit'),
             tmpfs=options.get('tmpfs'),
             tmpfs=options.get('tmpfs'),
+            oom_kill_disable=options.get('oom_kill_disable'),
             oom_score_adj=options.get('oom_score_adj'),
             oom_score_adj=options.get('oom_score_adj'),
             mem_swappiness=options.get('mem_swappiness'),
             mem_swappiness=options.get('mem_swappiness'),
             group_add=options.get('group_add'),
             group_add=options.get('group_add'),
@@ -877,6 +906,7 @@ class Service(object):
             device_read_iops=blkio_config.get('device_read_iops'),
             device_read_iops=blkio_config.get('device_read_iops'),
             device_write_bps=blkio_config.get('device_write_bps'),
             device_write_bps=blkio_config.get('device_write_bps'),
             device_write_iops=blkio_config.get('device_write_iops'),
             device_write_iops=blkio_config.get('device_write_iops'),
+            mounts=options.get('mounts'),
         )
         )
 
 
     def get_secret_volumes(self):
     def get_secret_volumes(self):
@@ -887,11 +917,11 @@ class Service(object):
             elif not os.path.isabs(target):
             elif not os.path.isabs(target):
                 target = '{}/{}'.format(const.SECRETS_PATH, target)
                 target = '{}/{}'.format(const.SECRETS_PATH, target)
 
 
-            return VolumeSpec(secret['file'], target, 'ro')
+            return MountSpec('bind', secret['file'], target, read_only=True)
 
 
         return [build_spec(secret) for secret in self.secrets]
         return [build_spec(secret) for secret in self.secrets]
 
 
-    def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
+    def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None):
         log.info('Building %s' % self.name)
         log.info('Building %s' % self.name)
 
 
         build_opts = self.options.get('build', {})
         build_opts = self.options.get('build', {})
@@ -921,6 +951,10 @@ class Service(object):
             network_mode=build_opts.get('network', None),
             network_mode=build_opts.get('network', None),
             target=build_opts.get('target', None),
             target=build_opts.get('target', None),
             shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
             shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
+            extra_hosts=build_opts.get('extra_hosts', None),
+            container_limits={
+                'memory': parse_bytes(memory) if memory else None
+            },
         )
         )
 
 
         try:
         try:
@@ -960,12 +994,12 @@ class Service(object):
     def custom_container_name(self):
     def custom_container_name(self):
         return self.options.get('container_name')
         return self.options.get('container_name')
 
 
-    def get_container_name(self, number, one_off=False):
+    def get_container_name(self, service_name, number, one_off=False):
         if self.custom_container_name and not one_off:
         if self.custom_container_name and not one_off:
             return self.custom_container_name
             return self.custom_container_name
 
 
         container_name = build_container_name(
         container_name = build_container_name(
-            self.project, self.name, number, one_off,
+            self.project, service_name, number, one_off,
         )
         )
         ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
         ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
         if container_name in ext_links_origins:
         if container_name in ext_links_origins:
@@ -1220,32 +1254,40 @@ def parse_repository_tag(repo_path):
 # Volumes
 # Volumes
 
 
 
 
-def merge_volume_bindings(volumes, tmpfs, previous_container):
-    """Return a list of volume bindings for a container. Container data volumes
-    are replaced by those from the previous container.
+def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
+    """
+        Return a list of volume bindings for a container. Container data volumes
+        are replaced by those from the previous container.
+        Anonymous mounts are updated in place.
     """
     """
     affinity = {}
     affinity = {}
 
 
     volume_bindings = dict(
     volume_bindings = dict(
         build_volume_binding(volume)
         build_volume_binding(volume)
         for volume in volumes
         for volume in volumes
-        if volume.external)
+        if volume.external
+    )
 
 
     if previous_container:
     if previous_container:
-        old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
+        old_volumes, old_mounts = get_container_data_volumes(
+            previous_container, volumes, tmpfs, mounts
+        )
         warn_on_masked_volume(volumes, old_volumes, previous_container.service)
         warn_on_masked_volume(volumes, old_volumes, previous_container.service)
         volume_bindings.update(
         volume_bindings.update(
-            build_volume_binding(volume) for volume in old_volumes)
+            build_volume_binding(volume) for volume in old_volumes
+        )
 
 
-        if old_volumes:
+        if old_volumes or old_mounts:
             affinity = {'affinity:container': '=' + previous_container.id}
             affinity = {'affinity:container': '=' + previous_container.id}
 
 
     return list(volume_bindings.values()), affinity
     return list(volume_bindings.values()), affinity
 
 
 
 
-def get_container_data_volumes(container, volumes_option, tmpfs_option):
-    """Find the container data volumes that are in `volumes_option`, and return
-    a mapping of volume bindings for those volumes.
+def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
+    """
+        Find the container data volumes that are in `volumes_option`, and return
+        a mapping of volume bindings for those volumes.
+        Anonymous volume mounts are updated in place instead.
     """
     """
     volumes = []
     volumes = []
     volumes_option = volumes_option or []
     volumes_option = volumes_option or []
@@ -1284,7 +1326,19 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option):
         volume = volume._replace(external=mount['Name'])
         volume = volume._replace(external=mount['Name'])
         volumes.append(volume)
         volumes.append(volume)
 
 
-    return volumes
+    updated_mounts = False
+    for mount in mounts_option:
+        if mount.type != 'volume':
+            continue
+
+        ctnr_mount = container_mounts.get(mount.target)
+        if not ctnr_mount.get('Name'):
+            continue
+
+        mount.source = ctnr_mount['Name']
+        updated_mounts = True
+
+    return volumes, updated_mounts
 
 
 
 
 def warn_on_masked_volume(volumes_option, container_volumes, service):
 def warn_on_masked_volume(volumes_option, container_volumes, service):
@@ -1331,6 +1385,18 @@ def build_volume_from(volume_from_spec):
         return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
         return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
 
 
 
 
+def build_mount(mount_spec):
+    kwargs = {}
+    if mount_spec.options:
+        for option, sdk_name in mount_spec.options_map[mount_spec.type].items():
+            if option in mount_spec.options:
+                kwargs[sdk_name] = mount_spec.options[option]
+
+    return Mount(
+        type=mount_spec.type, target=mount_spec.target, source=mount_spec.source,
+        read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
+    )
+
 # Labels
 # Labels
 
 
 
 

+ 1 - 1
compose/utils.py

@@ -101,7 +101,7 @@ def json_stream(stream):
 
 
 
 
 def json_hash(obj):
 def json_hash(obj):
-    dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
+    dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
     h = hashlib.sha256()
     h = hashlib.sha256()
     h.update(dump.encode('utf8'))
     h.update(dump.encode('utf8'))
     return h.hexdigest()
     return h.hexdigest()

+ 7 - 2
compose/volume.py

@@ -7,6 +7,7 @@ from docker.errors import NotFound
 from docker.utils import version_lt
 from docker.utils import version_lt
 
 
 from .config import ConfigurationError
 from .config import ConfigurationError
+from .config.types import VolumeSpec
 from .const import LABEL_PROJECT
 from .const import LABEL_PROJECT
 from .const import LABEL_VOLUME
 from .const import LABEL_VOLUME
 
 
@@ -145,5 +146,9 @@ class ProjectVolumes(object):
         if not volume_spec.is_named_volume:
         if not volume_spec.is_named_volume:
             return volume_spec
             return volume_spec
 
 
-        volume = self.volumes[volume_spec.external]
-        return volume_spec._replace(external=volume.full_name)
+        if isinstance(volume_spec, VolumeSpec):
+            volume = self.volumes[volume_spec.external]
+            return volume_spec._replace(external=volume.full_name)
+        else:
+            volume_spec.source = self.volumes[volume_spec.source].full_name
+            return volume_spec

+ 3 - 3
contrib/completion/bash/docker-compose

@@ -120,7 +120,7 @@ _docker_compose_build() {
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --no-cache --pull" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_compose_services_from_build
 			__docker_compose_services_from_build
@@ -403,14 +403,14 @@ _docker_compose_run() {
 			__docker_compose_nospace
 			__docker_compose_nospace
 			return
 			return
 			;;
 			;;
-		--entrypoint|--name|--user|-u|--volume|-v|--workdir|-w)
+		--entrypoint|--label|-l|--name|--user|-u|--volume|-v|--workdir|-w)
 			return
 			return
 			;;
 			;;
 	esac
 	esac
 
 
 	case "$cur" in
 	case "$cur" in
 		-*)
 		-*)
-			COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --volume -v --workdir -w" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --label -l --name --no-deps --publish -p --rm --service-ports -T --user -u --volume -v --workdir -w" -- "$cur" ) )
 			;;
 			;;
 		*)
 		*)
 			__docker_compose_services_all
 			__docker_compose_services_all

+ 1 - 0
contrib/completion/zsh/_docker-compose

@@ -196,6 +196,7 @@ __docker-compose_subcommand() {
                 $opts_help \
                 $opts_help \
                 "*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
                 "*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
                 '--force-rm[Always remove intermediate containers.]' \
                 '--force-rm[Always remove intermediate containers.]' \
+                '--memory[Memory limit for the build container.]' \
                 '--no-cache[Do not use cache when building the image.]' \
                 '--no-cache[Do not use cache when building the image.]' \
                 '--pull[Always attempt to pull a newer version of the image.]' \
                 '--pull[Always attempt to pull a newer version of the image.]' \
                 '*:services:__docker-compose_services_from_build' && ret=0
                 '*:services:__docker-compose_services_from_build' && ret=0

+ 5 - 0
docker-compose.spec

@@ -67,6 +67,11 @@ exe = EXE(pyz,
                 'compose/config/config_schema_v3.4.json',
                 'compose/config/config_schema_v3.4.json',
                 'DATA'
                 'DATA'
             ),
             ),
+            (
+                'compose/config/config_schema_v3.5.json',
+                'compose/config/config_schema_v3.5.json',
+                'DATA'
+            ),
             (
             (
                 'compose/GITSHA',
                 'compose/GITSHA',
                 'compose/GITSHA',
                 'compose/GITSHA',

+ 1 - 1
project/RELEASE-PROCESS.md

@@ -89,7 +89,7 @@ When prompted build the non-linux binaries and test them.
         Alternatively, you can use the usual commands to install or upgrade Compose:
         Alternatively, you can use the usual commands to install or upgrade Compose:
 
 
         ```
         ```
-        curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
+        curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
         chmod +x /usr/local/bin/docker-compose
         chmod +x /usr/local/bin/docker-compose
         ```
         ```
 
 

+ 3 - 3
requirements.txt

@@ -2,20 +2,20 @@ backports.ssl-match-hostname==3.5.0.1; python_version < '3'
 cached-property==1.3.0
 cached-property==1.3.0
 certifi==2017.4.17
 certifi==2017.4.17
 chardet==3.0.4
 chardet==3.0.4
-colorama==0.3.9; sys_platform == 'win32'
-docker==2.5.1
+docker==2.6.1
 docker-pycreds==0.2.1
 docker-pycreds==0.2.1
 dockerpty==0.4.1
 dockerpty==0.4.1
 docopt==0.6.2
 docopt==0.6.2
 enum34==1.1.6; python_version < '3.4'
 enum34==1.1.6; python_version < '3.4'
 functools32==3.2.3.post2; python_version < '3.2'
 functools32==3.2.3.post2; python_version < '3.2'
+git+git://github.com/tartley/colorama.git@bd378c725b45eba0b8e5cc091c3ca76a954c92ff; sys_platform == 'win32'
 idna==2.5
 idna==2.5
 ipaddress==1.0.18
 ipaddress==1.0.18
 jsonschema==2.6.0
 jsonschema==2.6.0
 pypiwin32==219; sys_platform == 'win32'
 pypiwin32==219; sys_platform == 'win32'
 PySocks==1.6.7
 PySocks==1.6.7
 PyYAML==3.12
 PyYAML==3.12
-requests==2.11.1
+requests==2.18.4
 six==1.10.0
 six==1.10.0
 texttable==0.9.1
 texttable==0.9.1
 urllib3==1.21.1
 urllib3==1.21.1

+ 5 - 0
script/release/download-binaries

@@ -30,3 +30,8 @@ mkdir $DESTINATION
 wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
 wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
 wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
 wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
 wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
 wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
+
+echo -e "\n\nCopy the following lines into the integrity check table in the release notes:\n\n"
+cd $DESTINATION
+ls | xargs sha256sum | sed 's/  / | /g' | sed -r 's/([^ |]+)/`\1`/g'
+cd -

+ 1 - 1
script/run/run.sh

@@ -15,7 +15,7 @@
 
 
 set -e
 set -e
 
 
-VERSION="1.17.1"
+VERSION="1.18.0-rc1"
 IMAGE="docker/compose:$VERSION"
 IMAGE="docker/compose:$VERSION"
 
 
 
 

+ 3 - 3
setup.py

@@ -33,10 +33,10 @@ install_requires = [
     'cached-property >= 1.2.0, < 2',
     'cached-property >= 1.2.0, < 2',
     'docopt >= 0.6.1, < 0.7',
     'docopt >= 0.6.1, < 0.7',
     'PyYAML >= 3.10, < 4',
     'PyYAML >= 3.10, < 4',
-    'requests >= 2.6.1, != 2.11.0, < 2.12',
+    'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
     'texttable >= 0.9.0, < 0.10',
     'texttable >= 0.9.0, < 0.10',
     'websocket-client >= 0.32.0, < 1.0',
     'websocket-client >= 0.32.0, < 1.0',
-    'docker >= 2.5.1, < 3.0',
+    'docker >= 2.6.1, < 3.0',
     'dockerpty >= 0.4.1, < 0.5',
     'dockerpty >= 0.4.1, < 0.5',
     'six >= 1.3.0, < 2',
     'six >= 1.3.0, < 2',
     'jsonschema >= 2.5.1, < 3',
     'jsonschema >= 2.5.1, < 3',
@@ -55,7 +55,7 @@ extras_require = {
     ':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
     ':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
     ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
     ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
     ':python_version < "3.3"': ['ipaddress >= 1.0.16'],
     ':python_version < "3.3"': ['ipaddress >= 1.0.16'],
-    ':sys_platform == "win32"': ['colorama >= 0.3.7, < 0.4'],
+    ':sys_platform == "win32"': ['colorama >= 0.3.9, < 0.4'],
     'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
     'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
 }
 }
 
 

+ 100 - 20
tests/acceptance/cli_test.py

@@ -33,6 +33,7 @@ from tests.integration.testcases import no_cluster
 from tests.integration.testcases import pull_busybox
 from tests.integration.testcases import pull_busybox
 from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
 from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
 from tests.integration.testcases import v2_1_only
 from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
 from tests.integration.testcases import v2_only
 from tests.integration.testcases import v2_only
 from tests.integration.testcases import v3_only
 from tests.integration.testcases import v3_only
 
 
@@ -349,6 +350,22 @@ class CLITestCase(DockerClientTestCase):
             }
             }
         }
         }
 
 
+    def test_config_external_network_v3_5(self):
+        self.base_dir = 'tests/fixtures/networks'
+        result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config'])
+        json_result = yaml.load(result.stdout)
+        assert 'networks' in json_result
+        assert json_result['networks'] == {
+            'foo': {
+                'external': True,
+                'name': 'some_foo',
+            },
+            'bar': {
+                'external': True,
+                'name': 'some_bar',
+            },
+        }
+
     def test_config_v1(self):
     def test_config_v1(self):
         self.base_dir = 'tests/fixtures/v1-config'
         self.base_dir = 'tests/fixtures/v1-config'
         result = self.dispatch(['config'])
         result = self.dispatch(['config'])
@@ -427,13 +444,21 @@ class CLITestCase(DockerClientTestCase):
                         'timeout': '1s',
                         'timeout': '1s',
                         'retries': 5,
                         'retries': 5,
                     },
                     },
-                    'volumes': [
-                        '/host/path:/container/path:ro',
-                        'foobar:/container/volumepath:rw',
-                        '/anonymous',
-                        'foobar:/container/volumepath2:nocopy'
-                    ],
-
+                    'volumes': [{
+                        'read_only': True,
+                        'source': '/host/path',
+                        'target': '/container/path',
+                        'type': 'bind'
+                    }, {
+                        'source': 'foobar', 'target': '/container/volumepath', 'type': 'volume'
+                    }, {
+                        'target': '/anonymous', 'type': 'volume'
+                    }, {
+                        'source': 'foobar',
+                        'target': '/container/volumepath2',
+                        'type': 'volume',
+                        'volume': {'nocopy': True}
+                    }],
                     'stop_grace_period': '20s',
                     'stop_grace_period': '20s',
                 },
                 },
             },
             },
@@ -583,6 +608,12 @@ class CLITestCase(DockerClientTestCase):
         result = self.dispatch(['build', '--no-cache'], None)
         result = self.dispatch(['build', '--no-cache'], None)
         assert 'shm_size: 96' in result.stdout
         assert 'shm_size: 96' in result.stdout
 
 
+    def test_build_memory_build_option(self):
+        pull_busybox(self.client)
+        self.base_dir = 'tests/fixtures/build-memory'
+        result = self.dispatch(['build', '--no-cache', '--memory', '96m', 'service'], None)
+        assert 'memory: 100663296' in result.stdout  # 96 * 1024 * 1024
+
     def test_bundle_with_digests(self):
     def test_bundle_with_digests(self):
         self.base_dir = 'tests/fixtures/bundle-with-digests/'
         self.base_dir = 'tests/fixtures/bundle-with-digests/'
         tmpdir = pytest.ensuretemp('cli_test_bundle')
         tmpdir = pytest.ensuretemp('cli_test_bundle')
@@ -719,12 +750,13 @@ class CLITestCase(DockerClientTestCase):
     def test_run_one_off_with_volume_merge(self):
     def test_run_one_off_with_volume_merge(self):
         self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
         self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
         volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
         volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
-        create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+        node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
 
 
         self.dispatch([
         self.dispatch([
             '-f', 'docker-compose.merge.yml',
             '-f', 'docker-compose.merge.yml',
             'run',
             'run',
             '-v', '{}:/data'.format(volume_path),
             '-v', '{}:/data'.format(volume_path),
+            '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
             'simple',
             'simple',
             'test', '-f', '/data/example.txt'
             'test', '-f', '/data/example.txt'
         ], returncode=0)
         ], returncode=0)
@@ -774,6 +806,27 @@ class CLITestCase(DockerClientTestCase):
         assert 'Removing network v2full_default' in result.stderr
         assert 'Removing network v2full_default' in result.stderr
         assert 'Removing network v2full_front' in result.stderr
         assert 'Removing network v2full_front' in result.stderr
 
 
+    def test_down_timeout(self):
+        self.dispatch(['up', '-d'], None)
+        service = self.project.get_service('simple')
+        self.assertEqual(len(service.containers()), 1)
+        self.assertTrue(service.containers()[0].is_running)
+        ""
+
+        self.dispatch(['down', '-t', '1'], None)
+
+        self.assertEqual(len(service.containers(stopped=True)), 0)
+
+    def test_down_signal(self):
+        self.base_dir = 'tests/fixtures/stop-signal-composefile'
+        self.dispatch(['up', '-d'], None)
+        service = self.project.get_service('simple')
+        self.assertEqual(len(service.containers()), 1)
+        self.assertTrue(service.containers()[0].is_running)
+
+        self.dispatch(['down', '-t', '1'], None)
+        self.assertEqual(len(service.containers(stopped=True)), 0)
+
     def test_up_detached(self):
     def test_up_detached(self):
         self.dispatch(['up', '-d'])
         self.dispatch(['up', '-d'])
         service = self.project.get_service('simple')
         service = self.project.get_service('simple')
@@ -1278,18 +1331,9 @@ class CLITestCase(DockerClientTestCase):
             ['up', '-d', '--force-recreate', '--no-recreate'],
             ['up', '-d', '--force-recreate', '--no-recreate'],
             returncode=1)
             returncode=1)
 
 
-    def test_up_with_timeout(self):
-        self.dispatch(['up', '-d', '-t', '1'])
-        service = self.project.get_service('simple')
-        another = self.project.get_service('another')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertEqual(len(another.containers()), 1)
-
-        # Ensure containers don't have stdin and stdout connected in -d mode
-        config = service.containers()[0].inspect()['Config']
-        self.assertFalse(config['AttachStderr'])
-        self.assertFalse(config['AttachStdout'])
-        self.assertFalse(config['AttachStdin'])
+    def test_up_with_timeout_detached(self):
+        result = self.dispatch(['up', '-d', '-t', '1'], returncode=1)
+        assert "-d and --timeout cannot be combined." in result.stderr
 
 
     def test_up_handles_sigint(self):
     def test_up_handles_sigint(self):
         proc = start_process(self.base_dir, ['up', '-t', '2'])
         proc = start_process(self.base_dir, ['up', '-t', '2'])
@@ -1374,6 +1418,31 @@ class CLITestCase(DockerClientTestCase):
         self.assertEqual(stdout, "operator\n")
         self.assertEqual(stdout, "operator\n")
         self.assertEqual(stderr, "")
         self.assertEqual(stderr, "")
 
 
+    @v2_2_only()
+    def test_exec_service_with_environment_overridden(self):
+        name = 'service'
+        self.base_dir = 'tests/fixtures/environment-exec'
+        self.dispatch(['up', '-d'])
+        self.assertEqual(len(self.project.containers()), 1)
+
+        stdout, stderr = self.dispatch([
+            'exec',
+            '-T',
+            '-e', 'foo=notbar',
+            '--env', 'alpha=beta',
+            name,
+            'env',
+        ])
+
+        # env overridden
+        assert 'foo=notbar' in stdout
+        # keep environment from yaml
+        assert 'hello=world' in stdout
+        # added option from command line
+        assert 'alpha=beta' in stdout
+
+        self.assertEqual(stderr, '')
+
     def test_run_service_without_links(self):
     def test_run_service_without_links(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['run', 'console', '/bin/true'])
         self.dispatch(['run', 'console', '/bin/true'])
@@ -1803,6 +1872,17 @@ class CLITestCase(DockerClientTestCase):
         assert 'FOO=bar' in environment
         assert 'FOO=bar' in environment
         assert 'BAR=baz' not in environment
         assert 'BAR=baz' not in environment
 
 
+    def test_run_label_flag(self):
+        self.base_dir = 'tests/fixtures/run-labels'
+        name = 'service'
+        self.dispatch(['run', '-l', 'default', '--label', 'foo=baz', name, '/bin/true'])
+        service = self.project.get_service(name)
+        container, = service.containers(stopped=True, one_off=OneOffFilter.only)
+        labels = container.labels
+        assert labels['default'] == ''
+        assert labels['foo'] == 'baz'
+        assert labels['hello'] == 'world'
+
     def test_rm(self):
     def test_rm(self):
         service = self.project.get_service('simple')
         service = self.project.get_service('simple')
         service.create_container()
         service.create_container()

+ 4 - 0
tests/fixtures/build-memory/Dockerfile

@@ -0,0 +1,4 @@
+FROM busybox
+
+# Report the memory (through the size of the group memory)
+RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)

+ 6 - 0
tests/fixtures/build-memory/docker-compose.yml

@@ -0,0 +1,6 @@
+version: '3.5'
+
+services:
+  service:
+    build:
+      context: .

+ 10 - 0
tests/fixtures/environment-exec/docker-compose.yml

@@ -0,0 +1,10 @@
+version: "2.2"
+
+services:
+  service:
+    image: busybox:latest
+    command: top
+
+    environment:
+      foo: bar
+      hello: world

+ 13 - 0
tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml

@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+  web:
+    # set value with default, default must be ignored
+    image: ${IMAGE:-alpine}
+
+    # unset value with default value
+    ports:
+      - "${HOST_PORT:-80}:8000"
+
+    # unset value with empty default
+    hostname: "host-${UNSET_VALUE:-}"

+ 17 - 0
tests/fixtures/networks/external-networks-v3-5.yml

@@ -0,0 +1,17 @@
+version: "3.5"
+
+services:
+  web:
+    image: busybox
+    command: top
+    networks:
+      - foo
+      - bar
+
+networks:
+  foo:
+    external: true
+    name: some_foo
+  bar:
+    external:
+      name: some_bar

+ 7 - 0
tests/fixtures/run-labels/docker-compose.yml

@@ -0,0 +1,7 @@
+service:
+  image: busybox:latest
+  command: top
+
+  labels:
+    foo: bar
+    hello: world

+ 8 - 5
tests/helpers.py

@@ -19,12 +19,8 @@ def build_config_details(contents, working_dir='working_dir', filename='filename
     )
     )
 
 
 
 
-def create_host_file(client, filename):
+def create_custom_host_file(client, filename, content):
     dirname = os.path.dirname(filename)
     dirname = os.path.dirname(filename)
-
-    with open(filename, 'r') as fh:
-        content = fh.read()
-
     container = client.create_container(
     container = client.create_container(
         'busybox:latest',
         'busybox:latest',
         ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
         ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
@@ -48,3 +44,10 @@ def create_host_file(client, filename):
             return container_info['Node']['Name']
             return container_info['Node']['Name']
     finally:
     finally:
         client.remove_container(container, force=True)
         client.remove_container(container, force=True)
+
+
+def create_host_file(client, filename):
+    with open(filename, 'r') as fh:
+        content = fh.read()
+
+    return create_custom_host_file(client, filename, content)

+ 58 - 0
tests/integration/project_test.py

@@ -35,6 +35,7 @@ from tests.integration.testcases import is_cluster
 from tests.integration.testcases import no_cluster
 from tests.integration.testcases import no_cluster
 from tests.integration.testcases import v2_1_only
 from tests.integration.testcases import v2_1_only
 from tests.integration.testcases import v2_2_only
 from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_3_only
 from tests.integration.testcases import v2_only
 from tests.integration.testcases import v2_only
 from tests.integration.testcases import v3_only
 from tests.integration.testcases import v3_only
 
 
@@ -436,6 +437,26 @@ class ProjectTest(DockerClientTestCase):
         self.assertNotEqual(db_container.id, old_db_id)
         self.assertNotEqual(db_container.id, old_db_id)
         self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
         self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
 
 
+    @v2_3_only()
+    def test_recreate_preserves_mounts(self):
+        web = self.create_service('web')
+        db = self.create_service('db', volumes=[types.MountSpec(type='volume', target='/etc')])
+        project = Project('composetest', [web, db], self.client)
+        project.start()
+        assert len(project.containers()) == 0
+
+        project.up(['db'])
+        assert len(project.containers()) == 1
+        old_db_id = project.containers()[0].id
+        db_volume_path = project.containers()[0].get_mount('/etc')['Source']
+
+        project.up(strategy=ConvergenceStrategy.always)
+        assert len(project.containers()) == 2
+
+        db_container = [c for c in project.containers() if 'db' in c.name][0]
+        assert db_container.id != old_db_id
+        assert db_container.get_mount('/etc')['Source'] == db_volume_path
+
     def test_project_up_with_no_recreate_running(self):
     def test_project_up_with_no_recreate_running(self):
         web = self.create_service('web')
         web = self.create_service('web')
         db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
         db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
@@ -932,6 +953,43 @@ class ProjectTest(DockerClientTestCase):
         assert 'LinkLocalIPs' in ipam_config
         assert 'LinkLocalIPs' in ipam_config
         assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
         assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
 
 
+    @v2_1_only()
+    def test_up_with_custom_name_resources(self):
+        config_data = build_config(
+            version=V2_2,
+            services=[{
+                'name': 'web',
+                'volumes': [VolumeSpec.parse('foo:/container-path')],
+                'networks': {'foo': {}},
+                'image': 'busybox:latest'
+            }],
+            networks={
+                'foo': {
+                    'name': 'zztop',
+                    'labels': {'com.docker.compose.test_value': 'sharpdressedman'}
+                }
+            },
+            volumes={
+                'foo': {
+                    'name': 'acdc',
+                    'labels': {'com.docker.compose.test_value': 'thefuror'}
+                }
+            }
+        )
+
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config_data
+        )
+
+        project.up(detached=True)
+        network = [n for n in self.client.networks() if n['Name'] == 'zztop'][0]
+        volume = [v for v in self.client.volumes()['Volumes'] if v['Name'] == 'acdc'][0]
+
+        assert network['Labels']['com.docker.compose.test_value'] == 'sharpdressedman'
+        assert volume['Labels']['com.docker.compose.test_value'] == 'thefuror'
+
     @v2_1_only()
     @v2_1_only()
     def test_up_with_isolation(self):
     def test_up_with_isolation(self):
         self.require_api_version('1.24')
         self.require_api_version('1.24')

+ 112 - 2
tests/integration/service_test.py

@@ -19,6 +19,7 @@ from .testcases import pull_busybox
 from .testcases import SWARM_SKIP_CONTAINERS_ALL
 from .testcases import SWARM_SKIP_CONTAINERS_ALL
 from .testcases import SWARM_SKIP_CPU_SHARES
 from .testcases import SWARM_SKIP_CPU_SHARES
 from compose import __version__
 from compose import __version__
+from compose.config.types import MountSpec
 from compose.config.types import VolumeFromSpec
 from compose.config.types import VolumeFromSpec
 from compose.config.types import VolumeSpec
 from compose.config.types import VolumeSpec
 from compose.const import IS_WINDOWS_PLATFORM
 from compose.const import IS_WINDOWS_PLATFORM
@@ -37,6 +38,7 @@ from compose.service import NetworkMode
 from compose.service import PidMode
 from compose.service import PidMode
 from compose.service import Service
 from compose.service import Service
 from compose.utils import parse_nanoseconds_int
 from compose.utils import parse_nanoseconds_int
+from tests.helpers import create_custom_host_file
 from tests.integration.testcases import is_cluster
 from tests.integration.testcases import is_cluster
 from tests.integration.testcases import no_cluster
 from tests.integration.testcases import no_cluster
 from tests.integration.testcases import v2_1_only
 from tests.integration.testcases import v2_1_only
@@ -239,8 +241,7 @@ class ServiceTest(DockerClientTestCase):
         service.start_container(container)
         service.start_container(container)
         self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
         self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
 
 
-    # @pytest.mark.xfail(True, reason='Not supported on most drivers')
-    @pytest.mark.skipif(True, reason='https://github.com/moby/moby/issues/34270')
+    @pytest.mark.xfail(True, reason='Not supported on most drivers')
     def test_create_container_with_storage_opt(self):
     def test_create_container_with_storage_opt(self):
         storage_opt = {'size': '1G'}
         storage_opt = {'size': '1G'}
         service = self.create_service('db', storage_opt=storage_opt)
         service = self.create_service('db', storage_opt=storage_opt)
@@ -248,6 +249,12 @@ class ServiceTest(DockerClientTestCase):
         service.start_container(container)
         service.start_container(container)
         self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
         self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
 
 
+    def test_create_container_with_oom_kill_disable(self):
+        self.require_api_version('1.20')
+        service = self.create_service('db', oom_kill_disable=True)
+        container = service.create_container()
+        assert container.get('HostConfig.OomKillDisable') is True
+
     def test_create_container_with_mac_address(self):
     def test_create_container_with_mac_address(self):
         service = self.create_service('db', mac_address='02:42:ac:11:65:43')
         service = self.create_service('db', mac_address='02:42:ac:11:65:43')
         container = service.create_container()
         container = service.create_container()
@@ -271,6 +278,54 @@ class ServiceTest(DockerClientTestCase):
         self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
         self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
                         msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
                         msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
 
 
+    @v2_3_only()
+    def test_create_container_with_host_mount(self):
+        host_path = '/tmp/host-path'
+        container_path = '/container-path'
+
+        create_custom_host_file(self.client, path.join(host_path, 'a.txt'), 'test')
+
+        service = self.create_service(
+            'db',
+            volumes=[
+                MountSpec(type='bind', source=host_path, target=container_path, read_only=True)
+            ]
+        )
+        container = service.create_container()
+        service.start_container(container)
+        mount = container.get_mount(container_path)
+        assert mount
+        assert path.basename(mount['Source']) == path.basename(host_path)
+        assert mount['RW'] is False
+
+    @v2_3_only()
+    def test_create_container_with_tmpfs_mount(self):
+        container_path = '/container-tmpfs'
+        service = self.create_service(
+            'db',
+            volumes=[MountSpec(type='tmpfs', target=container_path)]
+        )
+        container = service.create_container()
+        service.start_container(container)
+        mount = container.get_mount(container_path)
+        assert mount
+        assert mount['Type'] == 'tmpfs'
+
+    @v2_3_only()
+    def test_create_container_with_volume_mount(self):
+        container_path = '/container-volume'
+        volume_name = 'composetest_abcde'
+        self.client.create_volume(volume_name)
+        service = self.create_service(
+            'db',
+            volumes=[MountSpec(type='volume', source=volume_name, target=container_path)]
+        )
+        container = service.create_container()
+        service.start_container(container)
+        mount = container.get_mount(container_path)
+        assert mount
+        assert mount['Name'] == volume_name
+
     def test_create_container_with_healthcheck_config(self):
     def test_create_container_with_healthcheck_config(self):
         one_second = parse_nanoseconds_int('1s')
         one_second = parse_nanoseconds_int('1s')
         healthcheck = {
         healthcheck = {
@@ -434,6 +489,38 @@ class ServiceTest(DockerClientTestCase):
 
 
             orig_container = new_container
             orig_container = new_container
 
 
+    @v2_3_only()
+    def test_execute_convergence_plan_recreate_twice_with_mount(self):
+        service = self.create_service(
+            'db',
+            volumes=[MountSpec(target='/etc', type='volume')],
+            entrypoint=['top'],
+            command=['-d', '1']
+        )
+
+        orig_container = service.create_container()
+        service.start_container(orig_container)
+
+        orig_container.inspect()  # reload volume data
+        volume_path = orig_container.get_mount('/etc')['Source']
+
+        # Do this twice to reproduce the bug
+        for _ in range(2):
+            new_container, = service.execute_convergence_plan(
+                ConvergencePlan('recreate', [orig_container])
+            )
+
+            assert new_container.get_mount('/etc')['Source'] == volume_path
+            if not is_cluster(self.client):
+                assert ('affinity:container==%s' % orig_container.id in
+                        new_container.get('Config.Env'))
+            else:
+                # In Swarm, the env marker is consumed and the container should be deployed
+                # on the same node.
+                assert orig_container.get('Node.Name') == new_container.get('Node.Name')
+
+            orig_container = new_container
+
     def test_execute_convergence_plan_when_containers_are_stopped(self):
     def test_execute_convergence_plan_when_containers_are_stopped(self):
         service = self.create_service(
         service = self.create_service(
             'db',
             'db',
@@ -828,6 +915,29 @@ class ServiceTest(DockerClientTestCase):
         assert service.image()
         assert service.image()
         assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
         assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
 
 
+    @v2_3_only()
+    def test_build_with_extra_hosts(self):
+        self.require_api_version('1.27')
+        base_dir = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, base_dir)
+
+        with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+            f.write('\n'.join([
+                'FROM busybox',
+                'RUN ping -c1 foobar',
+                'RUN ping -c1 baz',
+            ]))
+
+        service = self.create_service('build_extra_hosts', build={
+            'context': text_type(base_dir),
+            'extra_hosts': {
+                'foobar': '127.0.0.1',
+                'baz': '127.0.0.1'
+            }
+        })
+        service.build()
+        assert service.image()
+
     def test_start_container_stays_unprivileged(self):
     def test_start_container_stays_unprivileged(self):
         service = self.create_service('web')
         service = self.create_service('web')
         container = create_and_start_container(service).inspect()
         container = create_and_start_container(service).inspect()

+ 3 - 3
tests/integration/testcases.py

@@ -20,7 +20,7 @@ from compose.const import COMPOSEFILE_V2_2 as V2_2
 from compose.const import COMPOSEFILE_V2_3 as V2_3
 from compose.const import COMPOSEFILE_V2_3 as V2_3
 from compose.const import COMPOSEFILE_V3_0 as V3_0
 from compose.const import COMPOSEFILE_V3_0 as V3_0
 from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import COMPOSEFILE_V3_2 as V3_2
-from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import COMPOSEFILE_V3_5 as V3_5
 from compose.const import LABEL_PROJECT
 from compose.const import LABEL_PROJECT
 from compose.progress_stream import stream_output
 from compose.progress_stream import stream_output
 from compose.service import Service
 from compose.service import Service
@@ -47,7 +47,7 @@ def get_links(container):
 
 
 def engine_max_version():
 def engine_max_version():
     if 'DOCKER_VERSION' not in os.environ:
     if 'DOCKER_VERSION' not in os.environ:
-        return V3_3
+        return V3_5
     version = os.environ['DOCKER_VERSION'].partition('-')[0]
     version = os.environ['DOCKER_VERSION'].partition('-')[0]
     if version_lt(version, '1.10'):
     if version_lt(version, '1.10'):
         return V1
         return V1
@@ -57,7 +57,7 @@ def engine_max_version():
         return V2_1
         return V2_1
     if version_lt(version, '17.06'):
     if version_lt(version, '17.06'):
         return V3_2
         return V3_2
-    return V3_3
+    return V3_5
 
 
 
 
 def min_version_skip(version):
 def min_version_skip(version):

+ 7 - 0
tests/unit/cli_test.py

@@ -10,6 +10,7 @@ from io import StringIO
 import docker
 import docker
 import py
 import py
 import pytest
 import pytest
+from docker.constants import DEFAULT_DOCKER_API_VERSION
 
 
 from .. import mock
 from .. import mock
 from .. import unittest
 from .. import unittest
@@ -98,6 +99,7 @@ class CLITestCase(unittest.TestCase):
     @mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
     @mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
     def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
     def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
         mock_client = mock.create_autospec(docker.APIClient)
         mock_client = mock.create_autospec(docker.APIClient)
+        mock_client.api_version = DEFAULT_DOCKER_API_VERSION
         project = Project.from_config(
         project = Project.from_config(
             name='composetest',
             name='composetest',
             client=mock_client,
             client=mock_client,
@@ -112,6 +114,7 @@ class CLITestCase(unittest.TestCase):
                 'SERVICE': 'service',
                 'SERVICE': 'service',
                 'COMMAND': None,
                 'COMMAND': None,
                 '-e': [],
                 '-e': [],
+                '--label': [],
                 '--user': None,
                 '--user': None,
                 '--no-deps': None,
                 '--no-deps': None,
                 '-d': False,
                 '-d': False,
@@ -130,6 +133,7 @@ class CLITestCase(unittest.TestCase):
 
 
     def test_run_service_with_restart_always(self):
     def test_run_service_with_restart_always(self):
         mock_client = mock.create_autospec(docker.APIClient)
         mock_client = mock.create_autospec(docker.APIClient)
+        mock_client.api_version = DEFAULT_DOCKER_API_VERSION
 
 
         project = Project.from_config(
         project = Project.from_config(
             name='composetest',
             name='composetest',
@@ -147,6 +151,7 @@ class CLITestCase(unittest.TestCase):
             'SERVICE': 'service',
             'SERVICE': 'service',
             'COMMAND': None,
             'COMMAND': None,
             '-e': [],
             '-e': [],
+            '--label': [],
             '--user': None,
             '--user': None,
             '--no-deps': None,
             '--no-deps': None,
             '-d': True,
             '-d': True,
@@ -170,6 +175,7 @@ class CLITestCase(unittest.TestCase):
             'SERVICE': 'service',
             'SERVICE': 'service',
             'COMMAND': None,
             'COMMAND': None,
             '-e': [],
             '-e': [],
+            '--label': [],
             '--user': None,
             '--user': None,
             '--no-deps': None,
             '--no-deps': None,
             '-d': True,
             '-d': True,
@@ -202,6 +208,7 @@ class CLITestCase(unittest.TestCase):
                 'SERVICE': 'service',
                 'SERVICE': 'service',
                 'COMMAND': None,
                 'COMMAND': None,
                 '-e': [],
                 '-e': [],
+                '--label': [],
                 '--user': None,
                 '--user': None,
                 '--no-deps': None,
                 '--no-deps': None,
                 '-d': True,
                 '-d': True,

+ 311 - 43
tests/unit/config/config_test.py

@@ -34,7 +34,6 @@ from compose.const import COMPOSEFILE_V3_1 as V3_1
 from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import COMPOSEFILE_V3_3 as V3_3
 from compose.const import COMPOSEFILE_V3_3 as V3_3
 from compose.const import IS_WINDOWS_PLATFORM
 from compose.const import IS_WINDOWS_PLATFORM
-from compose.utils import nanoseconds_from_time_seconds
 from tests import mock
 from tests import mock
 from tests import unittest
 from tests import unittest
 
 
@@ -433,6 +432,40 @@ class ConfigTest(unittest.TestCase):
                 'label_key': 'label_val'
                 'label_key': 'label_val'
             }
             }
 
 
+    def test_load_config_custom_resource_names(self):
+        base_file = config.ConfigFile(
+            'base.yaml', {
+                'version': '3.5',
+                'volumes': {
+                    'abc': {
+                        'name': 'xyz'
+                    }
+                },
+                'networks': {
+                    'abc': {
+                        'name': 'xyz'
+                    }
+                },
+                'secrets': {
+                    'abc': {
+                        'name': 'xyz'
+                    }
+                },
+                'configs': {
+                    'abc': {
+                        'name': 'xyz'
+                    }
+                }
+            }
+        )
+        details = config.ConfigDetails('.', [base_file])
+        loaded_config = config.load(details)
+
+        assert loaded_config.networks['abc'] == {'name': 'xyz'}
+        assert loaded_config.volumes['abc'] == {'name': 'xyz'}
+        assert loaded_config.secrets['abc']['name'] == 'xyz'
+        assert loaded_config.configs['abc']['name'] == 'xyz'
+
     def test_load_config_volume_and_network_labels(self):
     def test_load_config_volume_and_network_labels(self):
         base_file = config.ConfigFile(
         base_file = config.ConfigFile(
             'base.yaml',
             'base.yaml',
@@ -1138,9 +1171,12 @@ class ConfigTest(unittest.TestCase):
         details = config.ConfigDetails('.', [base_file, override_file])
         details = config.ConfigDetails('.', [base_file, override_file])
         service_dicts = config.load(details).services
         service_dicts = config.load(details).services
         svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
         svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
-        assert sorted(svc_volumes) == sorted(
-            ['/anonymous', '/c:/b:rw', 'vol:/x:ro']
-        )
+        for vol in svc_volumes:
+            assert vol in [
+                '/anonymous',
+                '/c:/b:rw',
+                {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
+            ]
 
 
     @mock.patch.dict(os.environ)
     @mock.patch.dict(os.environ)
     def test_volume_mode_override(self):
     def test_volume_mode_override(self):
@@ -1224,6 +1260,50 @@ class ConfigTest(unittest.TestCase):
         assert volume.external == 'data0028'
         assert volume.external == 'data0028'
         assert volume.is_named_volume
         assert volume.is_named_volume
 
 
+    def test_volumes_long_syntax(self):
+        base_file = config.ConfigFile(
+            'base.yaml', {
+                'version': '2.3',
+                'services': {
+                    'web': {
+                        'image': 'busybox:latest',
+                        'volumes': [
+                            {
+                                'target': '/anonymous', 'type': 'volume'
+                            }, {
+                                'source': '/abc', 'target': '/xyz', 'type': 'bind'
+                            }, {
+                                'source': '\\\\.\\pipe\\abcd', 'target': '/named_pipe', 'type': 'npipe'
+                            }, {
+                                'type': 'tmpfs', 'target': '/tmpfs'
+                            }
+                        ]
+                    },
+                },
+            },
+        )
+        details = config.ConfigDetails('.', [base_file])
+        config_data = config.load(details)
+        volumes = config_data.services[0].get('volumes')
+        anon_volume = [v for v in volumes if v.target == '/anonymous'][0]
+        tmpfs_mount = [v for v in volumes if v.type == 'tmpfs'][0]
+        host_mount = [v for v in volumes if v.type == 'bind'][0]
+        npipe_mount = [v for v in volumes if v.type == 'npipe'][0]
+
+        assert anon_volume.type == 'volume'
+        assert not anon_volume.is_named_volume
+
+        assert tmpfs_mount.target == '/tmpfs'
+        assert not tmpfs_mount.is_named_volume
+
+        assert host_mount.source == os.path.normpath('/abc')
+        assert host_mount.target == '/xyz'
+        assert not host_mount.is_named_volume
+
+        assert npipe_mount.source == '\\\\.\\pipe\\abcd'
+        assert npipe_mount.target == '/named_pipe'
+        assert not npipe_mount.is_named_volume
+
     def test_config_valid_service_names(self):
     def test_config_valid_service_names(self):
         for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
         for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
             services = config.load(
             services = config.load(
@@ -2493,8 +2573,8 @@ class ConfigTest(unittest.TestCase):
                 'name': 'web',
                 'name': 'web',
                 'image': 'example/web',
                 'image': 'example/web',
                 'secrets': [
                 'secrets': [
-                    types.ServiceSecret('one', None, None, None, None),
-                    types.ServiceSecret('source', 'target', '100', '200', 0o777),
+                    types.ServiceSecret('one', None, None, None, None, None),
+                    types.ServiceSecret('source', 'target', '100', '200', 0o777, None),
                 ],
                 ],
             },
             },
         ]
         ]
@@ -2540,8 +2620,8 @@ class ConfigTest(unittest.TestCase):
                 'name': 'web',
                 'name': 'web',
                 'image': 'example/web',
                 'image': 'example/web',
                 'secrets': [
                 'secrets': [
-                    types.ServiceSecret('one', None, None, None, None),
-                    types.ServiceSecret('source', 'target', '100', '200', 0o777),
+                    types.ServiceSecret('one', None, None, None, None, None),
+                    types.ServiceSecret('source', 'target', '100', '200', 0o777, None),
                 ],
                 ],
             },
             },
         ]
         ]
@@ -2578,8 +2658,8 @@ class ConfigTest(unittest.TestCase):
                 'name': 'web',
                 'name': 'web',
                 'image': 'example/web',
                 'image': 'example/web',
                 'configs': [
                 'configs': [
-                    types.ServiceConfig('one', None, None, None, None),
-                    types.ServiceConfig('source', 'target', '100', '200', 0o777),
+                    types.ServiceConfig('one', None, None, None, None, None),
+                    types.ServiceConfig('source', 'target', '100', '200', 0o777, None),
                 ],
                 ],
             },
             },
         ]
         ]
@@ -2625,13 +2705,40 @@ class ConfigTest(unittest.TestCase):
                 'name': 'web',
                 'name': 'web',
                 'image': 'example/web',
                 'image': 'example/web',
                 'configs': [
                 'configs': [
-                    types.ServiceConfig('one', None, None, None, None),
-                    types.ServiceConfig('source', 'target', '100', '200', 0o777),
+                    types.ServiceConfig('one', None, None, None, None, None),
+                    types.ServiceConfig('source', 'target', '100', '200', 0o777, None),
                 ],
                 ],
             },
             },
         ]
         ]
         assert service_sort(service_dicts) == service_sort(expected)
         assert service_sort(service_dicts) == service_sort(expected)
 
 
+    def test_service_volume_invalid_config(self):
+        config_details = build_config_details(
+            {
+                'version': '3.2',
+                'services': {
+                    'web': {
+                        'build': {
+                            'context': '.',
+                            'args': None,
+                        },
+                        'volumes': [
+                            {
+                                "type": "volume",
+                                "source": "/data",
+                                "garbage": {
+                                    "and": "error"
+                                }
+                            }
+                        ]
+                    },
+                },
+            }
+        )
+        with pytest.raises(ConfigurationError) as exc:
+            config.load(config_details)
+        assert "services.web.volumes contains unsupported option: 'garbage'" in exc.exconly()
+
 
 
 class NetworkModeTest(unittest.TestCase):
 class NetworkModeTest(unittest.TestCase):
 
 
@@ -2847,6 +2954,94 @@ class PortsTest(unittest.TestCase):
         )
         )
 
 
 
 
+class SubnetTest(unittest.TestCase):
+    INVALID_SUBNET_TYPES = [
+        None,
+        False,
+        10,
+    ]
+
+    INVALID_SUBNET_MAPPINGS = [
+        "",
+        "192.168.0.1/sdfsdfs",
+        "192.168.0.1/",
+        "192.168.0.1/33",
+        "192.168.0.1/01",
+        "192.168.0.1",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/sdfsdfs",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/129",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/01",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156",
+        "ge80:0000:0000:0000:0204:61ff:fe9d:f156/128",
+        "192.168.0.1/31/31",
+    ]
+
+    VALID_SUBNET_MAPPINGS = [
+        "192.168.0.1/0",
+        "192.168.0.1/32",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/0",
+        "fe80:0000:0000:0000:0204:61ff:fe9d:f156/128",
+        "1:2:3:4:5:6:7:8/0",
+        "1::/0",
+        "1:2:3:4:5:6:7::/0",
+        "1::8/0",
+        "1:2:3:4:5:6::8/0",
+        "::/0",
+        "::8/0",
+        "::2:3:4:5:6:7:8/0",
+        "fe80::7:8%eth0/0",
+        "fe80::7:8%1/0",
+        "::255.255.255.255/0",
+        "::ffff:255.255.255.255/0",
+        "::ffff:0:255.255.255.255/0",
+        "2001:db8:3:4::192.0.2.33/0",
+        "64:ff9b::192.0.2.33/0",
+    ]
+
+    def test_config_invalid_subnet_type_validation(self):
+        for invalid_subnet in self.INVALID_SUBNET_TYPES:
+            with pytest.raises(ConfigurationError) as exc:
+                self.check_config(invalid_subnet)
+
+            assert "contains an invalid type" in exc.value.msg
+
+    def test_config_invalid_subnet_format_validation(self):
+        for invalid_subnet in self.INVALID_SUBNET_MAPPINGS:
+            with pytest.raises(ConfigurationError) as exc:
+                self.check_config(invalid_subnet)
+
+            assert "should use the CIDR format" in exc.value.msg
+
+    def test_config_valid_subnet_format_validation(self):
+        for valid_subnet in self.VALID_SUBNET_MAPPINGS:
+            self.check_config(valid_subnet)
+
+    def check_config(self, subnet):
+        config.load(
+            build_config_details({
+                'version': '3.5',
+                'services': {
+                    'web': {
+                        'image': 'busybox'
+                    }
+                },
+                'networks': {
+                    'default': {
+                        'ipam': {
+                            'config': [
+                                {
+                                    'subnet': subnet
+                                }
+                            ],
+                            'driver': 'default'
+                        }
+                    }
+                }
+            })
+        )
+
+
 class InterpolationTest(unittest.TestCase):
 class InterpolationTest(unittest.TestCase):
 
 
     @mock.patch.dict(os.environ)
     @mock.patch.dict(os.environ)
@@ -2894,6 +3089,28 @@ class InterpolationTest(unittest.TestCase):
             }
             }
         ])
         ])
 
 
+    @mock.patch.dict(os.environ)
+    def test_config_file_with_environment_variable_with_defaults(self):
+        project_dir = 'tests/fixtures/environment-interpolation-with-defaults'
+        os.environ.update(
+            IMAGE="busybox",
+        )
+
+        service_dicts = config.load(
+            config.find(
+                project_dir, None, Environment.from_env_file(project_dir)
+            )
+        ).services
+
+        self.assertEqual(service_dicts, [
+            {
+                'name': 'web',
+                'image': 'busybox',
+                'ports': types.ServicePort.parse('80:8000'),
+                'hostname': 'host-',
+            }
+        ])
+
     @mock.patch.dict(os.environ)
     @mock.patch.dict(os.environ)
     def test_unset_variable_produces_warning(self):
     def test_unset_variable_produces_warning(self):
         os.environ.pop('FOO', None)
         os.environ.pop('FOO', None)
@@ -2948,7 +3165,7 @@ class InterpolationTest(unittest.TestCase):
         assert config_dict.secrets == {
         assert config_dict.secrets == {
             'secretdata': {
             'secretdata': {
                 'external': {'name': 'baz.bar'},
                 'external': {'name': 'baz.bar'},
-                'external_name': 'baz.bar'
+                'name': 'baz.bar'
             }
             }
         }
         }
 
 
@@ -2966,7 +3183,7 @@ class InterpolationTest(unittest.TestCase):
         assert config_dict.configs == {
         assert config_dict.configs == {
             'configdata': {
             'configdata': {
                 'external': {'name': 'baz.bar'},
                 'external': {'name': 'baz.bar'},
-                'external_name': 'baz.bar'
+                'name': 'baz.bar'
             }
             }
         }
         }
 
 
@@ -4188,52 +4405,103 @@ class BuildPathTest(unittest.TestCase):
 
 
 class HealthcheckTest(unittest.TestCase):
 class HealthcheckTest(unittest.TestCase):
     def test_healthcheck(self):
     def test_healthcheck(self):
-        service_dict = make_service_dict(
-            'test',
-            {'healthcheck': {
-                'test': ['CMD', 'true'],
-                'interval': '1s',
-                'timeout': '1m',
-                'retries': 3,
-                'start_period': '10s'
-            }},
-            '.',
+        config_dict = config.load(
+            build_config_details({
+                'version': '2.3',
+                'services': {
+                    'test': {
+                        'image': 'busybox',
+                        'healthcheck': {
+                            'test': ['CMD', 'true'],
+                            'interval': '1s',
+                            'timeout': '1m',
+                            'retries': 3,
+                            'start_period': '10s',
+                        }
+                    }
+                }
+
+            })
         )
         )
 
 
-        assert service_dict['healthcheck'] == {
+        serialized_config = yaml.load(serialize_config(config_dict))
+        serialized_service = serialized_config['services']['test']
+
+        assert serialized_service['healthcheck'] == {
             'test': ['CMD', 'true'],
             'test': ['CMD', 'true'],
-            'interval': nanoseconds_from_time_seconds(1),
-            'timeout': nanoseconds_from_time_seconds(60),
+            'interval': '1s',
+            'timeout': '1m',
             'retries': 3,
             'retries': 3,
-            'start_period': nanoseconds_from_time_seconds(10)
+            'start_period': '10s'
         }
         }
 
 
     def test_disable(self):
     def test_disable(self):
-        service_dict = make_service_dict(
-            'test',
-            {'healthcheck': {
-                'disable': True,
-            }},
-            '.',
+        config_dict = config.load(
+            build_config_details({
+                'version': '2.3',
+                'services': {
+                    'test': {
+                        'image': 'busybox',
+                        'healthcheck': {
+                            'disable': True,
+                        }
+                    }
+                }
+
+            })
         )
         )
 
 
-        assert service_dict['healthcheck'] == {
+        serialized_config = yaml.load(serialize_config(config_dict))
+        serialized_service = serialized_config['services']['test']
+
+        assert serialized_service['healthcheck'] == {
             'test': ['NONE'],
             'test': ['NONE'],
         }
         }
 
 
     def test_disable_with_other_config_is_invalid(self):
     def test_disable_with_other_config_is_invalid(self):
         with pytest.raises(ConfigurationError) as excinfo:
         with pytest.raises(ConfigurationError) as excinfo:
-            make_service_dict(
-                'invalid-healthcheck',
-                {'healthcheck': {
-                    'disable': True,
-                    'interval': '1s',
-                }},
-                '.',
+            config.load(
+                build_config_details({
+                    'version': '2.3',
+                    'services': {
+                        'invalid-healthcheck': {
+                            'image': 'busybox',
+                            'healthcheck': {
+                                'disable': True,
+                                'interval': '1s',
+                            }
+                        }
+                    }
+
+                })
+            )
+
+        assert 'invalid-healthcheck' in excinfo.exconly()
+        assert '"disable: true" cannot be combined with other options' in excinfo.exconly()
+
+    def test_healthcheck_with_invalid_test(self):
+        with pytest.raises(ConfigurationError) as excinfo:
+            config.load(
+                build_config_details({
+                    'version': '2.3',
+                    'services': {
+                        'invalid-healthcheck': {
+                            'image': 'busybox',
+                            'healthcheck': {
+                                'test': ['true'],
+                                'interval': '1s',
+                                'timeout': '1m',
+                                'retries': 3,
+                                'start_period': '10s',
+                            }
+                        }
+                    }
+
+                })
             )
             )
 
 
         assert 'invalid-healthcheck' in excinfo.exconly()
         assert 'invalid-healthcheck' in excinfo.exconly()
-        assert 'disable' in excinfo.exconly()
+        assert 'the first item must be either NONE, CMD or CMD-SHELL' in excinfo.exconly()
 
 
 
 
 class GetDefaultConfigFilesTestCase(unittest.TestCase):
 class GetDefaultConfigFilesTestCase(unittest.TestCase):

+ 14 - 0
tests/unit/config/environment_test.py

@@ -3,6 +3,11 @@ from __future__ import absolute_import
 from __future__ import print_function
 from __future__ import print_function
 from __future__ import unicode_literals
 from __future__ import unicode_literals
 
 
+import codecs
+
+import pytest
+
+from compose.config.environment import env_vars_from_file
 from compose.config.environment import Environment
 from compose.config.environment import Environment
 from tests import unittest
 from tests import unittest
 
 
@@ -38,3 +43,12 @@ class EnvironmentTest(unittest.TestCase):
         assert env.get_boolean('BAZ') is False
         assert env.get_boolean('BAZ') is False
         assert env.get_boolean('FOOBAR') is True
         assert env.get_boolean('FOOBAR') is True
         assert env.get_boolean('UNDEFINED') is False
         assert env.get_boolean('UNDEFINED') is False
+
+    def test_env_vars_from_file_bom(self):
+        tmpdir = pytest.ensuretemp('env_file')
+        self.addCleanup(tmpdir.remove)
+        with codecs.open('{}/bom.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
+            f.write('\ufeffPARK_BOM=박봄\n')
+        assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
+            'PARK_BOM': '박봄'
+        }

+ 195 - 3
tests/unit/config/interpolation_test.py

@@ -9,12 +9,22 @@ from compose.config.interpolation import Interpolator
 from compose.config.interpolation import InvalidInterpolation
 from compose.config.interpolation import InvalidInterpolation
 from compose.config.interpolation import TemplateWithDefaults
 from compose.config.interpolation import TemplateWithDefaults
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import COMPOSEFILE_V2_0 as V2_0
-from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_4 as V3_4
 
 
 
 
 @pytest.fixture
 @pytest.fixture
 def mock_env():
 def mock_env():
-    return Environment({'USER': 'jenny', 'FOO': 'bar'})
+    return Environment({
+        'USER': 'jenny',
+        'FOO': 'bar',
+        'TRUE': 'True',
+        'FALSE': 'OFF',
+        'POSINT': '50',
+        'NEGINT': '-200',
+        'FLOAT': '0.145',
+        'MODE': '0600',
+    })
 
 
 
 
 @pytest.fixture
 @pytest.fixture
@@ -102,7 +112,189 @@ def test_interpolate_environment_variables_in_secrets(mock_env):
         },
         },
         'other': {},
         'other': {},
     }
     }
-    value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
+    value = interpolate_environment_variables(V3_4, secrets, 'secret', mock_env)
+    assert value == expected
+
+
+def test_interpolate_environment_services_convert_types_v2(mock_env):
+    entry = {
+        'service1': {
+            'blkio_config': {
+                'weight': '${POSINT}',
+                'weight_device': [{'file': '/dev/sda1', 'weight': '${POSINT}'}]
+            },
+            'cpus': '${FLOAT}',
+            'cpu_count': '$POSINT',
+            'healthcheck': {
+                'retries': '${POSINT:-3}',
+                'disable': '${FALSE}',
+                'command': 'true'
+            },
+            'mem_swappiness': '${DEFAULT:-127}',
+            'oom_score_adj': '${NEGINT}',
+            'scale': '${POSINT}',
+            'ulimits': {
+                'nproc': '${POSINT}',
+                'nofile': {
+                    'soft': '${POSINT}',
+                    'hard': '${DEFAULT:-40000}'
+                },
+            },
+            'privileged': '${TRUE}',
+            'read_only': '${DEFAULT:-no}',
+            'tty': '${DEFAULT:-N}',
+            'stdin_open': '${DEFAULT-on}',
+        }
+    }
+
+    expected = {
+        'service1': {
+            'blkio_config': {
+                'weight': 50,
+                'weight_device': [{'file': '/dev/sda1', 'weight': 50}]
+            },
+            'cpus': 0.145,
+            'cpu_count': 50,
+            'healthcheck': {
+                'retries': 50,
+                'disable': False,
+                'command': 'true'
+            },
+            'mem_swappiness': 127,
+            'oom_score_adj': -200,
+            'scale': 50,
+            'ulimits': {
+                'nproc': 50,
+                'nofile': {
+                    'soft': 50,
+                    'hard': 40000
+                },
+            },
+            'privileged': True,
+            'read_only': False,
+            'tty': False,
+            'stdin_open': True,
+        }
+    }
+
+    value = interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+    assert value == expected
+
+
+def test_interpolate_environment_services_convert_types_v3(mock_env):
+    entry = {
+        'service1': {
+            'healthcheck': {
+                'retries': '${POSINT:-3}',
+                'disable': '${FALSE}',
+                'command': 'true'
+            },
+            'ulimits': {
+                'nproc': '${POSINT}',
+                'nofile': {
+                    'soft': '${POSINT}',
+                    'hard': '${DEFAULT:-40000}'
+                },
+            },
+            'privileged': '${TRUE}',
+            'read_only': '${DEFAULT:-no}',
+            'tty': '${DEFAULT:-N}',
+            'stdin_open': '${DEFAULT-on}',
+            'deploy': {
+                'update_config': {
+                    'parallelism': '${DEFAULT:-2}',
+                    'max_failure_ratio': '${FLOAT}',
+                },
+                'restart_policy': {
+                    'max_attempts': '$POSINT',
+                },
+                'replicas': '${DEFAULT-3}'
+            },
+            'ports': [{'target': '${POSINT}', 'published': '${DEFAULT:-5000}'}],
+            'configs': [{'mode': '${MODE}', 'source': 'config1'}],
+            'secrets': [{'mode': '${MODE}', 'source': 'secret1'}],
+        }
+    }
+
+    expected = {
+        'service1': {
+            'healthcheck': {
+                'retries': 50,
+                'disable': False,
+                'command': 'true'
+            },
+            'ulimits': {
+                'nproc': 50,
+                'nofile': {
+                    'soft': 50,
+                    'hard': 40000
+                },
+            },
+            'privileged': True,
+            'read_only': False,
+            'tty': False,
+            'stdin_open': True,
+            'deploy': {
+                'update_config': {
+                    'parallelism': 2,
+                    'max_failure_ratio': 0.145,
+                },
+                'restart_policy': {
+                    'max_attempts': 50,
+                },
+                'replicas': 3
+            },
+            'ports': [{'target': 50, 'published': 5000}],
+            'configs': [{'mode': 0o600, 'source': 'config1'}],
+            'secrets': [{'mode': 0o600, 'source': 'secret1'}],
+        }
+    }
+
+    value = interpolate_environment_variables(V3_4, entry, 'service', mock_env)
+    assert value == expected
+
+
+def test_interpolate_environment_network_convert_types(mock_env):
+    entry = {
+        'network1': {
+            'external': '${FALSE}',
+            'attachable': '${TRUE}',
+            'internal': '${DEFAULT:-false}'
+        }
+    }
+
+    expected = {
+        'network1': {
+            'external': False,
+            'attachable': True,
+            'internal': False,
+        }
+    }
+
+    value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
+    assert value == expected
+
+
+def test_interpolate_environment_external_resource_convert_types(mock_env):
+    entry = {
+        'resource1': {
+            'external': '${TRUE}',
+        }
+    }
+
+    expected = {
+        'resource1': {
+            'external': True,
+        }
+    }
+
+    value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
+    assert value == expected
+    value = interpolate_environment_variables(V3_4, entry, 'volume', mock_env)
+    assert value == expected
+    value = interpolate_environment_variables(V3_4, entry, 'secret', mock_env)
+    assert value == expected
+    value = interpolate_environment_variables(V3_4, entry, 'config', mock_env)
     assert value == expected
     assert value == expected
 
 
 
 

+ 26 - 0
tests/unit/config/types_test.py

@@ -100,11 +100,37 @@ class TestServicePort(object):
             'published': 25001
             'published': 25001
         } in reprs
         } in reprs
 
 
+    def test_parse_port_publish_range(self):
+        ports = ServicePort.parse('4440-4450:4000')
+        assert len(ports) == 1
+        reprs = [p.repr() for p in ports]
+        assert {
+            'target': 4000,
+            'published': '4440-4450'
+        } in reprs
+
     def test_parse_invalid_port(self):
     def test_parse_invalid_port(self):
         port_def = '4000p'
         port_def = '4000p'
         with pytest.raises(ConfigurationError):
         with pytest.raises(ConfigurationError):
             ServicePort.parse(port_def)
             ServicePort.parse(port_def)
 
 
+    def test_parse_invalid_publish_range(self):
+        port_def = '-4000:4000'
+        with pytest.raises(ConfigurationError):
+            ServicePort.parse(port_def)
+
+        port_def = 'asdf:4000'
+        with pytest.raises(ConfigurationError):
+            ServicePort.parse(port_def)
+
+        port_def = '1234-12f:4000'
+        with pytest.raises(ConfigurationError):
+            ServicePort.parse(port_def)
+
+        port_def = '1234-1235-1239:4000'
+        with pytest.raises(ConfigurationError):
+            ServicePort.parse(port_def)
+
 
 
 class TestVolumeSpec(object):
 class TestVolumeSpec(object):
 
 

+ 41 - 16
tests/unit/service_test.py

@@ -3,6 +3,7 @@ from __future__ import unicode_literals
 
 
 import docker
 import docker
 import pytest
 import pytest
+from docker.constants import DEFAULT_DOCKER_API_VERSION
 from docker.errors import APIError
 from docker.errors import APIError
 
 
 from .. import mock
 from .. import mock
@@ -40,6 +41,7 @@ class ServiceTest(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
         self.mock_client = mock.create_autospec(docker.APIClient)
         self.mock_client = mock.create_autospec(docker.APIClient)
+        self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
 
 
     def test_containers(self):
     def test_containers(self):
         service = Service('db', self.mock_client, 'myproject', image='foo')
         service = Service('db', self.mock_client, 'myproject', image='foo')
@@ -145,12 +147,6 @@ class ServiceTest(unittest.TestCase):
         self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
         self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
         from_service.create_container.assert_called_once_with()
         from_service.create_container.assert_called_once_with()
 
 
-    def test_split_domainname_none(self):
-        service = Service('foo', image='foo', hostname='name', client=self.mock_client)
-        opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['hostname'], 'name', 'hostname')
-        self.assertFalse('domainname' in opts, 'domainname')
-
     def test_memory_swap_limit(self):
     def test_memory_swap_limit(self):
         self.mock_client.create_host_config.return_value = {}
         self.mock_client.create_host_config.return_value = {}
 
 
@@ -179,7 +175,7 @@ class ServiceTest(unittest.TestCase):
             external_links=['default_foo_1']
             external_links=['default_foo_1']
         )
         )
         with self.assertRaises(DependencyError):
         with self.assertRaises(DependencyError):
-            service.get_container_name(1)
+            service.get_container_name('foo', 1)
 
 
     def test_mem_reservation(self):
     def test_mem_reservation(self):
         self.mock_client.create_host_config.return_value = {}
         self.mock_client.create_host_config.return_value = {}
@@ -232,7 +228,29 @@ class ServiceTest(unittest.TestCase):
             {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
             {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
         )
         )
 
 
+    def test_stop_grace_period(self):
+        self.mock_client.api_version = '1.25'
+        self.mock_client.create_host_config.return_value = {}
+        service = Service(
+            'foo',
+            image='foo',
+            client=self.mock_client,
+            stop_grace_period="1m35s")
+        opts = service._get_container_create_options({'image': 'foo'}, 1)
+        self.assertEqual(opts['stop_timeout'], 95)
+
+    def test_split_domainname_none(self):
+        service = Service(
+            'foo',
+            image='foo',
+            hostname='name.domain.tld',
+            client=self.mock_client)
+        opts = service._get_container_create_options({'image': 'foo'}, 1)
+        self.assertEqual(opts['hostname'], 'name.domain.tld', 'hostname')
+        self.assertFalse('domainname' in opts, 'domainname')
+
     def test_split_domainname_fqdn(self):
     def test_split_domainname_fqdn(self):
+        self.mock_client.api_version = '1.22'
         service = Service(
         service = Service(
             'foo',
             'foo',
             hostname='name.domain.tld',
             hostname='name.domain.tld',
@@ -243,6 +261,7 @@ class ServiceTest(unittest.TestCase):
         self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
         self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
 
 
     def test_split_domainname_both(self):
     def test_split_domainname_both(self):
+        self.mock_client.api_version = '1.22'
         service = Service(
         service = Service(
             'foo',
             'foo',
             hostname='name',
             hostname='name',
@@ -254,6 +273,7 @@ class ServiceTest(unittest.TestCase):
         self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
         self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
 
 
     def test_split_domainname_weird(self):
     def test_split_domainname_weird(self):
+        self.mock_client.api_version = '1.22'
         service = Service(
         service = Service(
             'foo',
             'foo',
             hostname='name.sub',
             hostname='name.sub',
@@ -478,6 +498,8 @@ class ServiceTest(unittest.TestCase):
             network_mode=None,
             network_mode=None,
             target=None,
             target=None,
             shmsize=None,
             shmsize=None,
+            extra_hosts=None,
+            container_limits={'memory': None},
         )
         )
 
 
     def test_ensure_image_exists_no_build(self):
     def test_ensure_image_exists_no_build(self):
@@ -518,7 +540,9 @@ class ServiceTest(unittest.TestCase):
             cache_from=None,
             cache_from=None,
             network_mode=None,
             network_mode=None,
             target=None,
             target=None,
-            shmsize=None
+            shmsize=None,
+            extra_hosts=None,
+            container_limits={'memory': None},
         )
         )
 
 
     def test_build_does_not_pull(self):
     def test_build_does_not_pull(self):
@@ -857,6 +881,7 @@ class ServiceVolumesTest(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):
         self.mock_client = mock.create_autospec(docker.APIClient)
         self.mock_client = mock.create_autospec(docker.APIClient)
+        self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
 
 
     def test_build_volume_binding(self):
     def test_build_volume_binding(self):
         binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
         binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
@@ -914,7 +939,7 @@ class ServiceVolumesTest(unittest.TestCase):
             VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
             VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
         ]
         ]
 
 
-        volumes = get_container_data_volumes(container, options, ['/dev/tmpfs'])
+        volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
         assert sorted(volumes) == sorted(expected)
         assert sorted(volumes) == sorted(expected)
 
 
     def test_merge_volume_bindings(self):
     def test_merge_volume_bindings(self):
@@ -950,7 +975,7 @@ class ServiceVolumesTest(unittest.TestCase):
             'existingvolume:/existing/volume:rw',
             'existingvolume:/existing/volume:rw',
         ]
         ]
 
 
-        binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container)
+        binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [])
         assert sorted(binds) == sorted(expected)
         assert sorted(binds) == sorted(expected)
         assert affinity == {'affinity:container': '=cdefab'}
         assert affinity == {'affinity:container': '=cdefab'}
 
 
@@ -1110,8 +1135,8 @@ class ServiceSecretTest(unittest.TestCase):
         )
         )
         volumes = service.get_secret_volumes()
         volumes = service.get_secret_volumes()
 
 
-        assert volumes[0].external == secret1['file']
-        assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
+        assert volumes[0].source == secret1['file']
+        assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
 
 
     def test_get_secret_volumes_abspath(self):
     def test_get_secret_volumes_abspath(self):
         secret1 = {
         secret1 = {
@@ -1126,8 +1151,8 @@ class ServiceSecretTest(unittest.TestCase):
         )
         )
         volumes = service.get_secret_volumes()
         volumes = service.get_secret_volumes()
 
 
-        assert volumes[0].external == secret1['file']
-        assert volumes[0].internal == secret1['secret'].target
+        assert volumes[0].source == secret1['file']
+        assert volumes[0].target == secret1['secret'].target
 
 
     def test_get_secret_volumes_no_target(self):
     def test_get_secret_volumes_no_target(self):
         secret1 = {
         secret1 = {
@@ -1142,5 +1167,5 @@ class ServiceSecretTest(unittest.TestCase):
         )
         )
         volumes = service.get_secret_volumes()
         volumes = service.get_secret_volumes()
 
 
-        assert volumes[0].external == secret1['file']
-        assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
+        assert volumes[0].source == secret1['file']
+        assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)

+ 0 - 1
tox.ini

@@ -18,7 +18,6 @@ deps =
     -rrequirements-dev.txt
     -rrequirements-dev.txt
 commands =
 commands =
     py.test -v \
     py.test -v \
-        --full-trace \
         --cov=compose \
         --cov=compose \
         --cov-report html \
         --cov-report html \
         --cov-report term \
         --cov-report term \