Quellcode durchsuchen

Merge pull request #5602 from docker/bump-1.19.0-rc1

Bump 1.19.0-rc1
Joffrey F vor 7 Jahren
Ursprung
Commit
a9ac6778df
65 geänderte Dateien mit 2538 neuen und 1436 gelöschten Zeilen
  1. 62 0
      .circleci/config.yml
  2. 0 30
      .travis.yml
  3. 87 0
      CHANGELOG.md
  4. 7 5
      Dockerfile
  5. 5 3
      Dockerfile.armhf
  6. 0 32
      ROADMAP.md
  7. 1 1
      compose/__init__.py
  8. 20 1
      compose/cli/command.py
  9. 11 2
      compose/cli/docker_client.py
  10. 162 41
      compose/cli/main.py
  11. 46 5
      compose/config/config.py
  12. 16 1
      compose/config/config_schema_v1.json
  13. 18 2
      compose/config/config_schema_v2.0.json
  14. 21 5
      compose/config/config_schema_v2.1.json
  15. 21 5
      compose/config/config_schema_v2.2.json
  16. 22 5
      compose/config/config_schema_v2.3.json
  17. 19 4
      compose/config/config_schema_v3.0.json
  18. 20 5
      compose/config/config_schema_v3.1.json
  19. 20 5
      compose/config/config_schema_v3.2.json
  20. 22 7
      compose/config/config_schema_v3.3.json
  21. 22 7
      compose/config/config_schema_v3.4.json
  22. 22 7
      compose/config/config_schema_v3.5.json
  23. 102 14
      compose/config/interpolation.py
  24. 5 1
      compose/config/serialize.py
  25. 45 7
      compose/config/types.py
  26. 1 0
      compose/const.py
  27. 25 7
      compose/container.py
  28. 5 1
      compose/network.py
  29. 24 5
      compose/parallel.py
  30. 32 26
      compose/project.py
  31. 41 19
      compose/service.py
  32. 58 26
      contrib/completion/bash/docker-compose
  33. 1 1
      requirements.txt
  34. 27 0
      script/circle/bintray-deploy.sh
  35. 2 0
      script/release/download-binaries
  36. 1 1
      script/run/run.sh
  37. 0 34
      script/setup/osx
  38. 0 29
      script/travis/bintray.json.tmpl
  39. 0 13
      script/travis/build-binary
  40. 0 10
      script/travis/ci
  41. 0 10
      script/travis/install
  42. 0 13
      script/travis/render-bintray-config.py
  43. 1 1
      setup.py
  44. 219 197
      tests/acceptance/cli_test.py
  45. 6 0
      tests/fixtures/ps-services-filter/docker-compose.yml
  46. 0 0
      tests/fixtures/tls/key.pem
  47. 260 118
      tests/integration/project_test.py
  48. 7 5
      tests/integration/resilience_test.py
  49. 297 185
      tests/integration/service_test.py
  50. 52 49
      tests/integration/state_test.py
  51. 12 0
      tests/integration/testcases.py
  52. 1 1
      tests/unit/bundle_test.py
  53. 24 4
      tests/unit/cli/docker_client_test.py
  54. 0 1
      tests/unit/cli/formatter_test.py
  55. 10 0
      tests/unit/cli/main_test.py
  56. 4 4
      tests/unit/cli/verbose_proxy_test.py
  57. 14 17
      tests/unit/cli_test.py
  58. 308 196
      tests/unit/config/config_test.py
  59. 77 2
      tests/unit/config/interpolation_test.py
  60. 33 41
      tests/unit/container_test.py
  61. 109 85
      tests/unit/parallel_test.py
  62. 6 6
      tests/unit/progress_stream_test.py
  63. 24 32
      tests/unit/project_test.py
  64. 79 100
      tests/unit/service_test.py
  65. 2 2
      tests/unit/split_buffer_test.py

+ 62 - 0
.circleci/config.yml

@@ -0,0 +1,62 @@
+version: 2
+jobs:
+  test:
+    macos:
+      xcode: "8.3.3"
+    steps:
+    - checkout
+#    - run:
+#        name: install python3
+#        command: brew install python3
+    - run:
+        name: install tox
+        command: sudo pip install --upgrade tox==2.1.1
+    - run:
+        name: unit tests
+        command: tox -e py27 -- tests/unit
+
+  build-osx-binary:
+    macos:
+      xcode: "8.3.3"
+    steps:
+      - checkout
+      - run:
+          name: upgrade python tools
+          command: sudo pip install --upgrade pip virtualenv
+      - run:
+         name: setup script
+         command: ./script/setup/osx
+      - run:
+         name: build script
+         command: ./script/build/osx
+      - store_artifacts:
+          path: dist/docker-compose-Darwin-x86_64
+          destination: docker-compose-Darwin-x86_64
+      - deploy:
+          name: Deploy binary to bintray
+          command: |
+            OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
+
+
+  build-linux-binary:
+    machine:
+      enabled: true
+    steps:
+      - checkout
+      - run:
+          name: build Linux binary
+          command: ./script/build/linux
+      - store_artifacts:
+          path: dist/docker-compose-Linux-x86_64
+          destination: docker-compose-Linux-x86_64
+      - deploy:
+          name: Deploy binary to bintray
+          command: |
+            OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
+workflows:
+  version: 2
+  all:
+    jobs:
+      - test
+      - build-linux-binary
+      - build-osx-binary

+ 0 - 30
.travis.yml

@@ -1,30 +0,0 @@
-sudo: required
-
-language: python
-
-matrix:
-  include:
-    - os: linux
-      services:
-      - docker
-    - os: osx
-      osx_image: xcode7.3
-      language: generic
-
-install: ./script/travis/install
-
-script:
-  - ./script/travis/ci
-  - ./script/travis/build-binary
-
-before_deploy:
-  - "./script/travis/render-bintray-config.py < ./script/travis/bintray.json.tmpl > ./bintray.json"
-
-deploy:
-  provider: bintray
-  user: docker-compose-roleuser
-  key: '$BINTRAY_API_KEY'
-  file: ./bintray.json
-  skip_cleanup: true
-  on:
-    all_branches: true

+ 87 - 0
CHANGELOG.md

@@ -1,6 +1,93 @@
 Change log
 ==========
 
+1.19.0 (2018-01-31)
+-------------------
+
+### Breaking changes
+
+- On UNIX platforms, interactive `run` and `exec` commands now require
+  the `docker` CLI to be installed on the client by default. To revert
+  to the previous behavior, users may set the `COMPOSE_INTERACTIVE_NO_CLI`
+  environment variable.
+
+### New features
+
+#### Compose file version 3.x
+
+- The output of the `config` command should now merge `deploy` options from
+  several Compose files in a more accurate manner
+
+#### Compose file version 2.3
+
+- Added support for the `runtime` option in service definitions
+
+#### Compose file version 2.1 and up
+
+- Added support for the `${VAR:?err}` and `${VAR?err}` variable interpolation
+  syntax to indicate mandatory variables
+
+#### Compose file version 2.x
+
+- Added `priority` key to service network mappings, allowing the user to
+  define in which order the specified service will connect to each network
+
+#### All formats
+
+- Added `--renew-anon-volumes` (shorthand `-V`) to the `up` command,
+  preventing Compose from recovering volume data from previous containers for
+  anonymous volumes
+
+- Added limit for number of simulatenous parallel operations, which should
+  prevent accidental resource exhaustion of the server. Default is 64 and
+  can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
+
+- Added `--always-recreate-deps` flag to the `up` command to force recreating
+  dependent services along with the dependency owner
+
+- Added `COMPOSE_IGNORE_ORPHANS` environment variable to forgo orphan
+  container detection and suppress warnings
+
+- Added `COMPOSE_FORCE_WINDOWS_HOST` environment variable to force Compose
+  to parse volume definitions as if the Docker host was a Windows system,
+  even if Compose itself is currently running on UNIX
+
+- Bash completion should now be able to better differentiate between running,
+  stopped and paused services
+
+### Bugfixes
+
+- Fixed a bug that would cause the `build` command to report a connection
+  error when the build context contained unreadable files or FIFO objects.
+  These file types will now be handled appropriately
+
+- Fixed various issues around interactive `run`/`exec` sessions.
+
+- Fixed a bug where setting TLS options with environment and CLI flags
+  simultaneously would result in part of the configuration being ignored
+
+- Fixed a bug where the `-d` and `--timeout` flags in `up` were erroneously
+  marked as incompatible
+
+- Fixed a bug where the recreation of a service would break if the image
+  associated with the previous container had been removed
+
+- Fixed a bug where `tmpfs` volumes declared using the extended syntax in
+  Compose files using version 3.2 would be erroneously created as anonymous
+  volumes instead
+
+- Fixed a bug where type conversion errors would print a stacktrace instead
+  of exiting gracefully
+
+- Fixed some errors related to unicode handling
+
+- Dependent services no longer get recreated along with the dependency owner
+  if their configuration hasn't changed
+
+- Added better validation of `labels` fields in Compose files. Label values
+  containing scalar types (number, boolean) now get automatically converted
+  to strings
+
 1.18.0 (2017-12-15)
 -------------------
 

+ 7 - 5
Dockerfile

@@ -17,11 +17,13 @@ RUN set -ex; \
     ; \
     rm -rf /var/lib/apt/lists/*
 
-RUN curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 \
-        -o /usr/local/bin/docker && \
-    SHA256=f024bc65c45a3778cf07213d26016075e8172de8f6e4b5702bedde06c241650f; \
-    echo "${SHA256}  /usr/local/bin/docker" | sha256sum -c - && \
-    chmod +x /usr/local/bin/docker
+RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.0-ce.tgz" && \
+    SHA256=692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d; \
+    echo "${SHA256}  dockerbins.tgz" | sha256sum -c - && \
+    tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
+    mv docker /usr/local/bin/docker && \
+    chmod +x /usr/local/bin/docker && \
+    rm dockerbins.tgz
 
 # Build Python 2.7.13 from source
 RUN set -ex; \

+ 5 - 3
Dockerfile.armhf

@@ -17,9 +17,11 @@ RUN set -ex; \
     ; \
     rm -rf /var/lib/apt/lists/*
 
-RUN curl https://get.docker.com/builds/Linux/armel/docker-1.8.3 \
-        -o /usr/local/bin/docker && \
-    chmod +x /usr/local/bin/docker
+RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
+    tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
+    mv docker /usr/local/bin/docker && \
+    chmod +x /usr/local/bin/docker && \
+    rm dockerbins.tgz
 
 # Build Python 2.7.13 from source
 RUN set -ex; \

+ 0 - 32
ROADMAP.md

@@ -1,32 +0,0 @@
-# Roadmap
-
-## An even better tool for development environments
-
-Compose is a great tool for development environments, but it could be even better. For example:
-
-- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
-
-## More than just development environments
-
-Compose currently works really well in development, but we want to make the Compose file format better for test, staging, and production environments. To support these use cases, there will need to be improvements to the file format, improvements to the command-line tool, integrations with other tools, and perhaps new tools altogether.
-
-Some specific things we are considering:
-
-- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
-  - It should roll back to a known good state if it fails.
-  - It should allow a user to check the actions it is about to perform before running them.
-- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
-- Compose should recommend a technique for zero-downtime deploys. ([#1786](https://github.com/docker/compose/issues/1786))
-- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
-
-## Integration with Swarm
-
-Compose should integrate really well with Swarm so you can take an application you've developed on your laptop and run it on a Swarm cluster.
-
-The current state of integration is documented in [SWARM.md](SWARM.md).
-
-## Applications spanning multiple teams
-
-Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well.
-
-There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318).

+ 1 - 1
compose/__init__.py

@@ -1,4 +1,4 @@
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
-__version__ = '1.18.0'
+__version__ = '1.19.0-rc1'

+ 20 - 1
compose/cli/command.py

@@ -10,6 +10,7 @@ import six
 from . import errors
 from . import verbose_proxy
 from .. import config
+from .. import parallel
 from ..config.environment import Environment
 from ..const import API_VERSIONS
 from ..project import Project
@@ -23,6 +24,8 @@ log = logging.getLogger(__name__)
 
 def project_from_options(project_dir, options):
     environment = Environment.from_env_file(project_dir)
+    set_parallel_limit(environment)
+
     host = options.get('--host')
     if host is not None:
         host = host.lstrip('=')
@@ -32,12 +35,28 @@ def project_from_options(project_dir, options):
         project_name=options.get('--project-name'),
         verbose=options.get('--verbose'),
         host=host,
-        tls_config=tls_config_from_options(options),
+        tls_config=tls_config_from_options(options, environment),
         environment=environment,
         override_dir=options.get('--project-directory'),
     )
 
 
+def set_parallel_limit(environment):
+    parallel_limit = environment.get('COMPOSE_PARALLEL_LIMIT')
+    if parallel_limit:
+        try:
+            parallel_limit = int(parallel_limit)
+        except ValueError:
+            raise errors.UserError(
+                'COMPOSE_PARALLEL_LIMIT must be an integer (found: "{}")'.format(
+                    environment.get('COMPOSE_PARALLEL_LIMIT')
+                )
+            )
+        if parallel_limit <= 1:
+            raise errors.UserError('COMPOSE_PARALLEL_LIMIT can not be less than 2')
+        parallel.GlobalLimit.set_global_limit(parallel_limit)
+
+
 def get_config_from_options(base_dir, options):
     environment = Environment.from_env_file(base_dir)
     config_path = get_config_path_from_options(

+ 11 - 2
compose/cli/docker_client.py

@@ -2,6 +2,7 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import logging
+import os.path
 import ssl
 
 from docker import APIClient
@@ -35,14 +36,22 @@ def get_tls_version(environment):
 
 
 def tls_config_from_options(options, environment=None):
+    environment = environment or {}
+    cert_path = environment.get('DOCKER_CERT_PATH') or None
+
     tls = options.get('--tls', False)
     ca_cert = unquote_path(options.get('--tlscacert'))
     cert = unquote_path(options.get('--tlscert'))
     key = unquote_path(options.get('--tlskey'))
-    verify = options.get('--tlsverify')
+    verify = options.get('--tlsverify', environment.get('DOCKER_TLS_VERIFY'))
     skip_hostname_check = options.get('--skip-hostname-check', False)
+    if cert_path is not None and not any((ca_cert, cert, key)):
+        # FIXME: Modify TLSConfig to take a cert_path argument and do this internally
+        cert = os.path.join(cert_path, 'cert.pem')
+        key = os.path.join(cert_path, 'key.pem')
+        ca_cert = os.path.join(cert_path, 'ca.pem')
 
-    tls_version = get_tls_version(environment or {})
+    tls_version = get_tls_version(environment)
 
     advanced_opts = any([ca_cert, cert, key, verify, tls_version])
 

+ 162 - 41
compose/cli/main.py

@@ -233,7 +233,7 @@ class TopLevelCommand(object):
             --force-rm              Always remove intermediate containers.
             --no-cache              Do not use cache when building the image.
             --pull                  Always attempt to pull a newer version of the image.
-            -m, --memory MEM        Sets memory limit for the bulid container.
+            -m, --memory MEM        Sets memory limit for the build container.
             --build-arg key=val     Set build-time variables for one service.
         """
         service_names = options['SERVICE']
@@ -377,9 +377,20 @@ class TopLevelCommand(object):
             -t, --timeout TIMEOUT   Specify a shutdown timeout in seconds.
                                     (default: 10)
         """
+        environment = Environment.from_env_file(self.project_dir)
+        ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+
+        if ignore_orphans and options['--remove-orphans']:
+            raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
+
         image_type = image_type_from_opt('--rmi', options['--rmi'])
         timeout = timeout_from_opts(options)
-        self.project.down(image_type, options['--volumes'], options['--remove-orphans'], timeout=timeout)
+        self.project.down(
+            image_type,
+            options['--volumes'],
+            options['--remove-orphans'],
+            timeout=timeout,
+            ignore_orphans=ignore_orphans)
 
     def events(self, options):
         """
@@ -423,6 +434,8 @@ class TopLevelCommand(object):
             -e, --env KEY=VAL Set environment variables (can be used multiple times,
                               not supported in API < 1.25)
         """
+        environment = Environment.from_env_file(self.project_dir)
+        use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
         index = int(options.get('--index'))
         service = self.project.get_service(options['SERVICE'])
         detach = options['-d']
@@ -437,14 +450,14 @@ class TopLevelCommand(object):
         command = [options['COMMAND']] + options['ARGS']
         tty = not options["-T"]
 
-        if IS_WINDOWS_PLATFORM and not detach:
+        if IS_WINDOWS_PLATFORM or use_cli and not detach:
             sys.exit(call_docker(build_exec_command(options, container.id, command)))
 
         create_exec_options = {
             "privileged": options["--privileged"],
             "user": options["--user"],
             "tty": tty,
-            "stdin": tty,
+            "stdin": True,
         }
 
         if docker.utils.version_gte(self.project.client.api_version, '1.25'):
@@ -611,8 +624,21 @@ class TopLevelCommand(object):
         Usage: ps [options] [SERVICE...]
 
         Options:
-            -q    Only display IDs
+            -q                   Only display IDs
+            --services           Display services
+            --filter KEY=VAL     Filter services by a property
         """
+        if options['-q'] and options['--services']:
+            raise UserError('-q and --services cannot be combined')
+
+        if options['--services']:
+            filt = build_filter(options.get('--filter'))
+            services = self.project.services
+            if filt:
+                services = filter_services(filt, services, self.project)
+            print('\n'.join(service.name for service in services))
+            return
+
         containers = sorted(
             self.project.containers(service_names=options['SERVICE'], stopped=True) +
             self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
@@ -768,7 +794,7 @@ class TopLevelCommand(object):
             command = service.options.get('command')
 
         container_options = build_container_options(options, detach, command)
-        run_one_off_container(container_options, self.project, service, options)
+        run_one_off_container(container_options, self.project, service, options, self.project_dir)
 
     def scale(self, options):
         """
@@ -902,31 +928,34 @@ class TopLevelCommand(object):
         Options:
             -d                         Detached mode: Run containers in the background,
                                        print new container names. Incompatible with
-                                       --abort-on-container-exit and --timeout.
+                                       --abort-on-container-exit.
             --no-color                 Produce monochrome output.
             --no-deps                  Don't start linked services.
             --force-recreate           Recreate containers even if their configuration
                                        and image haven't changed.
+            --always-recreate-deps     Recreate dependent containers.
                                        Incompatible with --no-recreate.
-            --no-recreate              If containers already exist, don't recreate them.
-                                       Incompatible with --force-recreate.
+            --no-recreate              If containers already exist, don't recreate
+                                       them. Incompatible with --force-recreate and -V.
             --no-build                 Don't build an image, even if it's missing.
             --no-start                 Don't start the services after creating them.
             --build                    Build images before starting containers.
-            --abort-on-container-exit  Stops all containers if any container was stopped.
-                                       Incompatible with -d.
-            -t, --timeout TIMEOUT      Use this timeout in seconds for container shutdown
-                                       when attached or when containers are already.
-                                       Incompatible with -d.
-                                       running. (default: 10)
-            --remove-orphans           Remove containers for services not
-                                       defined in the Compose file
-            --exit-code-from SERVICE   Return the exit code of the selected service container.
-                                       Implies --abort-on-container-exit.
-            --scale SERVICE=NUM        Scale SERVICE to NUM instances. Overrides the `scale`
-                                       setting in the Compose file if present.
+            --abort-on-container-exit  Stops all containers if any container was
+                                       stopped. Incompatible with -d.
+            -t, --timeout TIMEOUT      Use this timeout in seconds for container
+                                       shutdown when attached or when containers are
+                                       already running. (default: 10)
+            -V, --renew-anon-volumes   Recreate anonymous volumes instead of retrieving
+                                       data from the previous containers.
+            --remove-orphans           Remove containers for services not defined
+                                       in the Compose file.
+            --exit-code-from SERVICE   Return the exit code of the selected service
+                                       container. Implies --abort-on-container-exit.
+            --scale SERVICE=NUM        Scale SERVICE to NUM instances. Overrides the
+                                       `scale` setting in the Compose file if present.
         """
         start_deps = not options['--no-deps']
+        always_recreate_deps = options['--always-recreate-deps']
         exit_value_from = exitval_from_opts(options, self.project)
         cascade_stop = options['--abort-on-container-exit']
         service_names = options['SERVICE']
@@ -938,26 +967,49 @@ class TopLevelCommand(object):
         if detached and (cascade_stop or exit_value_from):
             raise UserError("--abort-on-container-exit and -d cannot be combined.")
 
-        if detached and timeout:
-            raise UserError("-d and --timeout cannot be combined.")
+        environment = Environment.from_env_file(self.project_dir)
+        ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+
+        if ignore_orphans and remove_orphans:
+            raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
 
-        if no_start:
-            for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
-                if options.get(excluded):
-                    raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+        opts = ['-d', '--abort-on-container-exit', '--exit-code-from']
+        for excluded in [x for x in opts if options.get(x) and no_start]:
+            raise UserError('--no-start and {} cannot be combined.'.format(excluded))
 
         with up_shutdown_context(self.project, service_names, timeout, detached):
-            to_attach = self.project.up(
-                service_names=service_names,
-                start_deps=start_deps,
-                strategy=convergence_strategy_from_opts(options),
-                do_build=build_action_from_opts(options),
-                timeout=timeout,
-                detached=detached,
-                remove_orphans=remove_orphans,
-                scale_override=parse_scale_args(options['--scale']),
-                start=not no_start
-            )
+            warn_for_swarm_mode(self.project.client)
+
+            def up(rebuild):
+                return self.project.up(
+                    service_names=service_names,
+                    start_deps=start_deps,
+                    strategy=convergence_strategy_from_opts(options),
+                    do_build=build_action_from_opts(options),
+                    timeout=timeout,
+                    detached=detached,
+                    remove_orphans=remove_orphans,
+                    ignore_orphans=ignore_orphans,
+                    scale_override=parse_scale_args(options['--scale']),
+                    start=not no_start,
+                    always_recreate_deps=always_recreate_deps,
+                    reset_container_image=rebuild,
+                    renew_anonymous_volumes=options.get('--renew-anon-volumes')
+                )
+
+            try:
+                to_attach = up(False)
+            except docker.errors.ImageNotFound as e:
+                log.error(
+                    "The image for the service you're trying to recreate has been removed. "
+                    "If you continue, volume data could be lost. Consider backing up your data "
+                    "before continuing.\n".format(e.explanation)
+                )
+                res = yesno("Continue with the new image? [yN]", False)
+                if res is None or not res:
+                    raise e
+
+                to_attach = up(True)
 
             if detached or no_start:
                 return
@@ -1034,10 +1086,14 @@ def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all
 def convergence_strategy_from_opts(options):
     no_recreate = options['--no-recreate']
     force_recreate = options['--force-recreate']
+    renew_anonymous_volumes = options.get('--renew-anon-volumes')
     if force_recreate and no_recreate:
         raise UserError("--force-recreate and --no-recreate cannot be combined.")
 
-    if force_recreate:
+    if no_recreate and renew_anonymous_volumes:
+        raise UserError('--no-recreate and --renew-anon-volumes cannot be combined.')
+
+    if force_recreate or renew_anonymous_volumes:
         return ConvergenceStrategy.always
 
     if no_recreate:
@@ -1169,7 +1225,7 @@ def build_container_options(options, detach, command):
     return container_options
 
 
-def run_one_off_container(container_options, project, service, options):
+def run_one_off_container(container_options, project, service, options, project_dir='.'):
     if not options['--no-deps']:
         deps = service.get_dependency_names()
         if deps:
@@ -1196,10 +1252,13 @@ def run_one_off_container(container_options, project, service, options):
         if options['--rm']:
             project.client.remove_container(container.id, force=True, v=True)
 
+    environment = Environment.from_env_file(project_dir)
+    use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+
     signals.set_signal_handler_to_shutdown()
     try:
         try:
-            if IS_WINDOWS_PLATFORM:
+            if IS_WINDOWS_PLATFORM or use_cli:
                 service.connect_container_to_networks(container)
                 exit_code = call_docker(["start", "--attach", "--interactive", container.id])
             else:
@@ -1331,3 +1390,65 @@ def build_exec_command(options, container_id, command):
     args += [container_id]
     args += command
     return args
+
+
+def has_container_with_state(containers, state):
+    states = {
+        'running': lambda c: c.is_running,
+        'stopped': lambda c: not c.is_running,
+        'paused': lambda c: c.is_paused,
+        'restarting': lambda c: c.is_restarting,
+    }
+    for container in containers:
+        if state not in states:
+            raise UserError("Invalid state: %s" % state)
+        if states[state](container):
+            return True
+
+
+def filter_services(filt, services, project):
+    def should_include(service):
+        for f in filt:
+            if f == 'status':
+                state = filt[f]
+                containers = project.containers([service.name], stopped=True)
+                if not has_container_with_state(containers, state):
+                    return False
+            elif f == 'source':
+                source = filt[f]
+                if source == 'image' or source == 'build':
+                    if source not in service.options:
+                        return False
+                else:
+                    raise UserError("Invalid value for source filter: %s" % source)
+            else:
+                raise UserError("Invalid filter: %s" % f)
+        return True
+
+    return filter(should_include, services)
+
+
+def build_filter(arg):
+    filt = {}
+    if arg is not None:
+        if '=' not in arg:
+            raise UserError("Arguments to --filter should be in form KEY=VAL")
+        key, val = arg.split('=', 1)
+        filt[key] = val
+    return filt
+
+
+def warn_for_swarm_mode(client):
+    info = client.info()
+    if info.get('Swarm', {}).get('LocalNodeState') == 'active':
+        if info.get('ServerVersion', '').startswith('ucp'):
+            # UCP does multi-node scheduling with traditional Compose files.
+            return
+
+        log.warn(
+            "The Docker Engine you're using is running in swarm mode.\n\n"
+            "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
+            "All containers will be scheduled on the current node.\n\n"
+            "To deploy your application across the swarm, "
+            "use `docker stack deploy`.\n"
+        )

+ 46 - 5
compose/config/config.py

@@ -19,6 +19,7 @@ from ..const import COMPOSEFILE_V2_1 as V2_1
 from ..const import COMPOSEFILE_V3_0 as V3_0
 from ..const import COMPOSEFILE_V3_4 as V3_4
 from ..utils import build_string_dict
+from ..utils import json_hash
 from ..utils import parse_bytes
 from ..utils import parse_nanoseconds_int
 from ..utils import splitdrive
@@ -98,6 +99,7 @@ DOCKER_CONFIG_KEYS = [
     'privileged',
     'read_only',
     'restart',
+    'runtime',
     'secrets',
     'security_opt',
     'shm_size',
@@ -814,11 +816,12 @@ def finalize_service_volumes(service_dict, environment):
     if 'volumes' in service_dict:
         finalized_volumes = []
         normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+        win_host = environment.get_boolean('COMPOSE_FORCE_WINDOWS_HOST')
         for v in service_dict['volumes']:
             if isinstance(v, dict):
-                finalized_volumes.append(MountSpec.parse(v, normalize))
+                finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
             else:
-                finalized_volumes.append(VolumeSpec.parse(v, normalize))
+                finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
         service_dict['volumes'] = finalized_volumes
 
     return service_dict
@@ -920,10 +923,14 @@ class MergeDict(dict):
             self.base.get(field, default),
             self.override.get(field, default))
 
-    def merge_mapping(self, field, parse_func):
+    def merge_mapping(self, field, parse_func=None):
         if not self.needs_merge(field):
             return
 
+        if parse_func is None:
+            def parse_func(m):
+                return m or {}
+
         self[field] = parse_func(self.base.get(field))
         self[field].update(parse_func(self.override.get(field)))
 
@@ -955,7 +962,6 @@ def merge_service_dicts(base, override, version):
     md.merge_sequence('links', ServiceLink.parse)
     md.merge_sequence('secrets', types.ServiceSecret.parse)
     md.merge_sequence('configs', types.ServiceConfig.parse)
-    md.merge_mapping('deploy', parse_deploy)
     md.merge_mapping('extra_hosts', parse_extra_hosts)
 
     for field in ['volumes', 'devices']:
@@ -974,6 +980,7 @@ def merge_service_dicts(base, override, version):
     merge_ports(md, base, override)
     md.merge_field('blkio_config', merge_blkio_config, default={})
     md.merge_field('healthcheck', merge_healthchecks, default={})
+    md.merge_field('deploy', merge_deploy, default={})
 
     for field in set(ALLOWED_KEYS) - set(md):
         md.merge_scalar(field)
@@ -1037,6 +1044,41 @@ def merge_build(output, base, override):
     return dict(md)
 
 
+def merge_deploy(base, override):
+    md = MergeDict(base or {}, override or {})
+    md.merge_scalar('mode')
+    md.merge_scalar('endpoint_mode')
+    md.merge_scalar('replicas')
+    md.merge_mapping('labels', parse_labels)
+    md.merge_mapping('update_config')
+    md.merge_mapping('restart_policy')
+    if md.needs_merge('resources'):
+        resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
+        resources_md.merge_mapping('limits')
+        resources_md.merge_field('reservations', merge_reservations, default={})
+        md['resources'] = dict(resources_md)
+    if md.needs_merge('placement'):
+        placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {})
+        placement_md.merge_field('constraints', merge_unique_items_lists, default=[])
+        placement_md.merge_field('preferences', merge_unique_objects_lists, default=[])
+        md['placement'] = dict(placement_md)
+
+    return dict(md)
+
+
+def merge_reservations(base, override):
+    md = MergeDict(base, override)
+    md.merge_scalar('cpus')
+    md.merge_scalar('memory')
+    md.merge_sequence('generic_resources', types.GenericResource.parse)
+    return dict(md)
+
+
+def merge_unique_objects_lists(base, override):
+    result = dict((json_hash(i), i) for i in base + override)
+    return [i[1] for i in sorted([(k, v) for k, v in result.items()], key=lambda x: x[0])]
+
+
 def merge_blkio_config(base, override):
     md = MergeDict(base, override)
     md.merge_scalar('weight')
@@ -1123,7 +1165,6 @@ parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
 parse_depends_on = functools.partial(
     parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
 )
-parse_deploy = functools.partial(parse_dict_or_list, split_kv, 'deploy')
 
 
 def parse_flat_dict(d):

+ 16 - 1
compose/config/config_schema_v1.json

@@ -78,7 +78,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
         "log_driver": {"type": "string"},
         "log_opt": {"type": "object"},
@@ -166,6 +166,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 18 - 2
compose/config/config_schema_v2.0.json

@@ -158,7 +158,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -191,7 +191,8 @@
                       "properties": {
                         "aliases": {"$ref": "#/definitions/list_of_strings"},
                         "ipv4_address": {"type": "string"},
-                        "ipv6_address": {"type": "string"}
+                        "ipv6_address": {"type": "string"},
+                        "priority": {"type": "number"}
                       },
                       "additionalProperties": false
                     },
@@ -354,6 +355,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "blkio_limit": {
       "type": "object",
       "properties": {

+ 21 - 5
compose/config/config_schema_v2.1.json

@@ -88,7 +88,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"}
+                "labels": {"$ref": "#/definitions/labels"}
               },
               "additionalProperties": false
             }
@@ -183,7 +183,7 @@
         "image": {"type": "string"},
         "ipc": {"type": "string"},
         "isolation": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -217,7 +217,8 @@
                         "aliases": {"$ref": "#/definitions/list_of_strings"},
                         "ipv4_address": {"type": "string"},
                         "ipv6_address": {"type": "string"},
-                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+                        "priority": {"type": "number"}
                       },
                       "additionalProperties": false
                     },
@@ -350,7 +351,7 @@
         },
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -374,7 +375,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -408,6 +409,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "blkio_limit": {
       "type": "object",
       "properties": {

+ 21 - 5
compose/config/config_schema_v2.2.json

@@ -88,7 +88,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/labels"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "network": {"type": "string"}
               },
@@ -189,7 +189,7 @@
         "init": {"type": ["boolean", "string"]},
         "ipc": {"type": "string"},
         "isolation": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -223,7 +223,8 @@
                         "aliases": {"$ref": "#/definitions/list_of_strings"},
                         "ipv4_address": {"type": "string"},
                         "ipv6_address": {"type": "string"},
-                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+                        "priority": {"type": "number"}
                       },
                       "additionalProperties": false
                     },
@@ -357,7 +358,7 @@
         },
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -381,7 +382,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -415,6 +416,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "blkio_limit": {
       "type": "object",
       "properties": {

+ 22 - 5
compose/config/config_schema_v2.3.json

@@ -88,7 +88,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/labels"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "network": {"type": "string"},
                 "target": {"type": "string"},
@@ -192,7 +192,7 @@
         "init": {"type": ["boolean", "string"]},
         "ipc": {"type": "string"},
         "isolation": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -226,7 +226,8 @@
                         "aliases": {"$ref": "#/definitions/list_of_strings"},
                         "ipv4_address": {"type": "string"},
                         "ipv6_address": {"type": "string"},
-                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+                        "priority": {"type": "number"}
                       },
                       "additionalProperties": false
                     },
@@ -261,6 +262,7 @@
         "privileged": {"type": "boolean"},
         "read_only": {"type": "boolean"},
         "restart": {"type": "string"},
+        "runtime": {"type": "string"},
         "scale": {"type": "integer"},
         "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
         "shm_size": {"type": ["number", "string"]},
@@ -393,7 +395,7 @@
         },
         "internal": {"type": "boolean"},
         "enable_ipv6": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -417,7 +419,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "name": {"type": "string"}
       },
       "additionalProperties": false
@@ -451,6 +453,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "blkio_limit": {
       "type": "object",
       "properties": {

+ 19 - 4
compose/config/config_schema_v3.0.json

@@ -105,7 +105,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -223,7 +223,7 @@
       "properties": {
         "mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -310,7 +310,7 @@
           "additionalProperties": false
         },
         "internal": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -333,7 +333,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -366,6 +366,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 20 - 5
compose/config/config_schema_v3.1.json

@@ -116,7 +116,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -252,7 +252,7 @@
       "properties": {
         "mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -339,7 +339,7 @@
           "additionalProperties": false
         },
         "internal": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -362,7 +362,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -378,7 +378,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -411,6 +411,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 20 - 5
compose/config/config_schema_v3.2.json

@@ -118,7 +118,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -299,7 +299,7 @@
         "mode": {"type": "string"},
         "endpoint_mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -387,7 +387,7 @@
         },
         "internal": {"type": "boolean"},
         "attachable": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -410,7 +410,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -426,7 +426,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -459,6 +459,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 22 - 7
compose/config/config_schema_v3.3.json

@@ -83,7 +83,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/labels"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"}
               },
               "additionalProperties": false
@@ -151,7 +151,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -332,7 +332,7 @@
         "mode": {"type": "string"},
         "endpoint_mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -430,7 +430,7 @@
         },
         "internal": {"type": "boolean"},
         "attachable": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -453,7 +453,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -469,7 +469,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -485,7 +485,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -518,6 +518,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 22 - 7
compose/config/config_schema_v3.4.json

@@ -85,7 +85,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/labels"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "network": {"type": "string"},
                 "target": {"type": "string"}
@@ -155,7 +155,7 @@
         "hostname": {"type": "string"},
         "image": {"type": "string"},
         "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -337,7 +337,7 @@
         "mode": {"type": "string"},
         "endpoint_mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -438,7 +438,7 @@
         },
         "internal": {"type": "boolean"},
         "attachable": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -462,7 +462,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -478,7 +478,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -494,7 +494,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -527,6 +527,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 22 - 7
compose/config/config_schema_v3.5.json

@@ -84,7 +84,7 @@
                 "context": {"type": "string"},
                 "dockerfile": {"type": "string"},
                 "args": {"$ref": "#/definitions/list_or_dict"},
-                "labels": {"$ref": "#/definitions/list_or_dict"},
+                "labels": {"$ref": "#/definitions/labels"},
                 "cache_from": {"$ref": "#/definitions/list_of_strings"},
                 "network": {"type": "string"},
                 "target": {"type": "string"},
@@ -156,7 +156,7 @@
         "image": {"type": "string"},
         "ipc": {"type": "string"},
         "isolation": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
 
         "logging": {
@@ -338,7 +338,7 @@
         "mode": {"type": "string"},
         "endpoint_mode": {"type": "string"},
         "replicas": {"type": "integer"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "labels": {"$ref": "#/definitions/labels"},
         "update_config": {
           "type": "object",
           "properties": {
@@ -464,7 +464,7 @@
         },
         "internal": {"type": "boolean"},
         "attachable": {"type": "boolean"},
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -488,7 +488,7 @@
           },
           "additionalProperties": false
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -505,7 +505,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -522,7 +522,7 @@
             "name": {"type": "string"}
           }
         },
-        "labels": {"$ref": "#/definitions/list_or_dict"}
+        "labels": {"$ref": "#/definitions/labels"}
       },
       "additionalProperties": false
     },
@@ -555,6 +555,21 @@
       ]
     },
 
+    "labels": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": "string"
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
     "constraints": {
       "service": {
         "id": "#/definitions/constraints/service",

+ 102 - 14
compose/config/interpolation.py

@@ -60,6 +60,15 @@ def interpolate_value(name, config_key, value, section, interpolator):
                 name=name,
                 section=section,
                 string=e.string))
+    except UnsetRequiredSubstitution as e:
+        raise ConfigurationError(
+            'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
+                config_key=config_key,
+                name=name,
+                section=section,
+                err=e.err
+            )
+        )
 
 
 def recursive_interpolate(obj, interpolator, config_path):
@@ -75,26 +84,61 @@ def recursive_interpolate(obj, interpolator, config_path):
         )
     if isinstance(obj, list):
         return [recursive_interpolate(val, interpolator, config_path) for val in obj]
-    return obj
+    return converter.convert(config_path, obj)
 
 
 class TemplateWithDefaults(Template):
-    idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]*)?'
+    pattern = r"""
+        %(delim)s(?:
+            (?P<escaped>%(delim)s) |
+            (?P<named>%(id)s)      |
+            {(?P<braced>%(bid)s)}  |
+            (?P<invalid>)
+        )
+        """ % {
+        'delim': re.escape('$'),
+        'id': r'[_a-z][_a-z0-9]*',
+        'bid': r'[_a-z][_a-z0-9]*(?:(?P<sep>:?[-?])[^}]*)?',
+    }
+
+    @staticmethod
+    def process_braced_group(braced, sep, mapping):
+        if ':-' == sep:
+            var, _, default = braced.partition(':-')
+            return mapping.get(var) or default
+        elif '-' == sep:
+            var, _, default = braced.partition('-')
+            return mapping.get(var, default)
+
+        elif ':?' == sep:
+            var, _, err = braced.partition(':?')
+            result = mapping.get(var)
+            if not result:
+                raise UnsetRequiredSubstitution(err)
+            return result
+        elif '?' == sep:
+            var, _, err = braced.partition('?')
+            if var in mapping:
+                return mapping.get(var)
+            raise UnsetRequiredSubstitution(err)
 
     # Modified from python2.7/string.py
     def substitute(self, mapping):
         # Helper function for .sub()
+
         def convert(mo):
-            # Check the most common path first.
             named = mo.group('named') or mo.group('braced')
+            braced = mo.group('braced')
+            if braced is not None:
+                sep = mo.group('sep')
+                result = self.process_braced_group(braced, sep, mapping)
+                if result:
+                    return result
+
             if named is not None:
-                if ':-' in named:
-                    var, _, default = named.partition(':-')
-                    return mapping.get(var) or default
-                if '-' in named:
-                    var, _, default = named.partition('-')
-                    return mapping.get(var, default)
                 val = mapping[named]
+                if isinstance(val, six.binary_type):
+                    val = val.decode('utf-8')
                 return '%s' % (val,)
             if mo.group('escaped') is not None:
                 return self.delimiter
@@ -110,11 +154,17 @@ class InvalidInterpolation(Exception):
         self.string = string
 
 
+class UnsetRequiredSubstitution(Exception):
+    def __init__(self, custom_err_msg):
+        self.err = custom_err_msg
+
+
 PATH_JOKER = '[^.]+'
+FULL_JOKER = '.+'
 
 
 def re_path(*args):
-    return re.compile('^{}$'.format('.'.join(args)))
+    return re.compile('^{}$'.format('\.'.join(args)))
 
 
 def re_path_basic(section, name):
@@ -126,6 +176,8 @@ def service_path(*args):
 
 
 def to_boolean(s):
+    if not isinstance(s, six.string_types):
+        return s
     s = s.lower()
     if s in ['y', 'yes', 'true', 'on']:
         return True
@@ -135,27 +187,52 @@ def to_boolean(s):
 
 
 def to_int(s):
+    if not isinstance(s, six.string_types):
+        return s
+
     # We must be able to handle octal representation for `mode` values notably
     if six.PY3 and re.match('^0[0-9]+$', s.strip()):
         s = '0o' + s[1:]
-    return int(s, base=0)
+    try:
+        return int(s, base=0)
+    except ValueError:
+        raise ValueError('"{}" is not a valid integer'.format(s))
+
+
+def to_float(s):
+    if not isinstance(s, six.string_types):
+        return s
+
+    try:
+        return float(s)
+    except ValueError:
+        raise ValueError('"{}" is not a valid float'.format(s))
+
+
+def to_str(o):
+    if isinstance(o, (bool, float, int)):
+        return '{}'.format(o)
+    return o
 
 
 class ConversionMap(object):
     map = {
         service_path('blkio_config', 'weight'): to_int,
         service_path('blkio_config', 'weight_device', 'weight'): to_int,
-        service_path('cpus'): float,
+        service_path('build', 'labels', FULL_JOKER): to_str,
+        service_path('cpus'): to_float,
         service_path('cpu_count'): to_int,
         service_path('configs', 'mode'): to_int,
         service_path('secrets', 'mode'): to_int,
         service_path('healthcheck', 'retries'): to_int,
         service_path('healthcheck', 'disable'): to_boolean,
+        service_path('deploy', 'labels', PATH_JOKER): to_str,
         service_path('deploy', 'replicas'): to_int,
         service_path('deploy', 'update_config', 'parallelism'): to_int,
-        service_path('deploy', 'update_config', 'max_failure_ratio'): float,
+        service_path('deploy', 'update_config', 'max_failure_ratio'): to_float,
         service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
         service_path('mem_swappiness'): to_int,
+        service_path('labels', FULL_JOKER): to_str,
         service_path('oom_kill_disable'): to_boolean,
         service_path('oom_score_adj'): to_int,
         service_path('ports', 'target'): to_int,
@@ -173,15 +250,26 @@ class ConversionMap(object):
         re_path_basic('network', 'attachable'): to_boolean,
         re_path_basic('network', 'external'): to_boolean,
         re_path_basic('network', 'internal'): to_boolean,
+        re_path('network', PATH_JOKER, 'labels', FULL_JOKER): to_str,
         re_path_basic('volume', 'external'): to_boolean,
+        re_path('volume', PATH_JOKER, 'labels', FULL_JOKER): to_str,
         re_path_basic('secret', 'external'): to_boolean,
+        re_path('secret', PATH_JOKER, 'labels', FULL_JOKER): to_str,
         re_path_basic('config', 'external'): to_boolean,
+        re_path('config', PATH_JOKER, 'labels', FULL_JOKER): to_str,
     }
 
     def convert(self, path, value):
         for rexp in self.map.keys():
             if rexp.match(path):
-                return self.map[rexp](value)
+                try:
+                    return self.map[rexp](value)
+                except ValueError as e:
+                    raise ConfigurationError(
+                        'Error while attempting to convert {} to appropriate type: {}'.format(
+                            path, e
+                        )
+                    )
         return value
 
 

+ 5 - 1
compose/config/serialize.py

@@ -27,6 +27,9 @@ def serialize_string(dumper, data):
     """ Ensure boolean-like strings are quoted in the output and escape $ characters """
     representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
 
+    if isinstance(data, six.binary_type):
+        data = data.decode('utf-8')
+
     data = data.replace('$', '$$')
 
     if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
@@ -90,7 +93,8 @@ def serialize_config(config, image_digests=None):
         denormalize_config(config, image_digests),
         default_flow_style=False,
         indent=2,
-        width=80
+        width=80,
+        allow_unicode=True
     )
 
 

+ 45 - 7
compose/config/types.py

@@ -4,6 +4,7 @@ Types for objects parsed from the configuration.
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
+import ntpath
 import os
 import re
 from collections import namedtuple
@@ -145,9 +146,10 @@ class MountSpec(object):
     _fields = ['type', 'source', 'target', 'read_only', 'consistency']
 
     @classmethod
-    def parse(cls, mount_dict, normalize=False):
+    def parse(cls, mount_dict, normalize=False, win_host=False):
+        normpath = ntpath.normpath if win_host else os.path.normpath
         if mount_dict.get('source'):
-            mount_dict['source'] = os.path.normpath(mount_dict['source'])
+            mount_dict['source'] = normpath(mount_dict['source'])
             if normalize:
                 mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
 
@@ -183,12 +185,17 @@ class MountSpec(object):
     def is_named_volume(self):
         return self.type == 'volume' and self.source
 
+    @property
+    def is_tmpfs(self):
+        return self.type == 'tmpfs'
+
     @property
     def external(self):
         return self.source
 
 
 class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
+    win32 = False
 
     @classmethod
     def _parse_unix(cls, volume_config):
@@ -232,7 +239,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
         else:
             external = parts[0]
             parts = separate_next_section(parts[1])
-            external = os.path.normpath(external)
+            external = ntpath.normpath(external)
             internal = parts[0]
             if len(parts) > 1:
                 if ':' in parts[1]:
@@ -245,14 +252,16 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
         if normalize:
             external = normalize_path_for_engine(external) if external else None
 
-        return cls(external, internal, mode)
+        result = cls(external, internal, mode)
+        result.win32 = True
+        return result
 
     @classmethod
-    def parse(cls, volume_config, normalize=False):
+    def parse(cls, volume_config, normalize=False, win_host=False):
         """Parse a volume_config path and split it into external:internal[:mode]
         parts to be returned as a valid VolumeSpec.
         """
-        if IS_WINDOWS_PLATFORM:
+        if IS_WINDOWS_PLATFORM or win_host:
             return cls._parse_win32(volume_config, normalize)
         else:
             return cls._parse_unix(volume_config)
@@ -265,7 +274,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
     @property
     def is_named_volume(self):
         res = self.external and not self.external.startswith(('.', '/', '~'))
-        if not IS_WINDOWS_PLATFORM:
+        if not self.win32:
             return res
 
         return (
@@ -404,6 +413,35 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
         return normalize_port_dict(self.repr())
 
 
+class GenericResource(namedtuple('_GenericResource', 'kind value')):
+    @classmethod
+    def parse(cls, dct):
+        if 'discrete_resource_spec' not in dct:
+            raise ConfigurationError(
+                'generic_resource entry must include a discrete_resource_spec key'
+            )
+        if 'kind' not in dct['discrete_resource_spec']:
+            raise ConfigurationError(
+                'generic_resource entry must include a discrete_resource_spec.kind subkey'
+            )
+        return cls(
+            dct['discrete_resource_spec']['kind'],
+            dct['discrete_resource_spec'].get('value')
+        )
+
+    def repr(self):
+        return {
+            'discrete_resource_spec': {
+                'kind': self.kind,
+                'value': self.value,
+            }
+        }
+
+    @property
+    def merge_field(self):
+        return self.kind
+
+
 def normalize_port_dict(port):
     return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
         published=port.get('published', ''),

+ 1 - 0
compose/const.py

@@ -18,6 +18,7 @@ LABEL_VERSION = 'com.docker.compose.version'
 LABEL_VOLUME = 'com.docker.compose.volume'
 LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
 NANOCPUS_SCALE = 1000000000
+PARALLEL_LIMIT = 64
 
 SECRETS_PATH = '/run/secrets'
 

+ 25 - 7
compose/container.py

@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 from functools import reduce
 
 import six
+from docker.errors import ImageNotFound
 
 from .const import LABEL_CONTAINER_NUMBER
 from .const import LABEL_PROJECT
@@ -66,15 +67,17 @@ class Container(object):
     def name(self):
         return self.dictionary['Name'][1:]
 
+    @property
+    def project(self):
+        return self.labels.get(LABEL_PROJECT)
+
     @property
     def service(self):
         return self.labels.get(LABEL_SERVICE)
 
     @property
     def name_without_project(self):
-        project = self.labels.get(LABEL_PROJECT)
-
-        if self.name.startswith('{0}_{1}'.format(project, self.service)):
+        if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
             return '{0}_{1}'.format(self.service, self.number)
         else:
             return self.name
@@ -230,10 +233,10 @@ class Container(object):
         """Rename the container to a hopefully unique temporary container name
         by prepending the short id.
         """
-        self.client.rename(
-            self.id,
-            '%s_%s' % (self.short_id, self.name)
-        )
+        if not self.name.startswith(self.short_id):
+            self.client.rename(
+                self.id, '{0}_{1}'.format(self.short_id, self.name)
+            )
 
     def inspect_if_not_inspected(self):
         if not self.has_been_inspected:
@@ -250,6 +253,21 @@ class Container(object):
         self.has_been_inspected = True
         return self.dictionary
 
+    def image_exists(self):
+        try:
+            self.client.inspect_image(self.image)
+        except ImageNotFound:
+            return False
+
+        return True
+
+    def reset_image(self, img_id):
+        """ If this container's image has been removed, temporarily replace the old image ID
+            with `img_id`.
+        """
+        if not self.image_exists():
+            self.dictionary['Image'] = img_id
+
     def attach(self, *args, **kwargs):
         return self.client.attach(self.id, *args, **kwargs)
 

+ 5 - 1
compose/network.py

@@ -2,6 +2,7 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import logging
+from collections import OrderedDict
 
 from docker.errors import NotFound
 from docker.types import IPAMConfig
@@ -286,4 +287,7 @@ def get_networks(service_dict, network_definitions):
                 'Service "{}" uses an undefined network "{}"'
                 .format(service_dict['name'], name))
 
-    return networks
+    return OrderedDict(sorted(
+        networks.items(),
+        key=lambda t: t[1].get('priority') or 0, reverse=True
+    ))

+ 24 - 5
compose/parallel.py

@@ -8,6 +8,7 @@ from threading import Semaphore
 from threading import Thread
 
 from docker.errors import APIError
+from docker.errors import ImageNotFound
 from six.moves import _thread as thread
 from six.moves.queue import Empty
 from six.moves.queue import Queue
@@ -15,6 +16,7 @@ from six.moves.queue import Queue
 from compose.cli.colors import green
 from compose.cli.colors import red
 from compose.cli.signals import ShutdownException
+from compose.const import PARALLEL_LIMIT
 from compose.errors import HealthCheckFailed
 from compose.errors import NoHealthCheckConfigured
 from compose.errors import OperationFailedError
@@ -26,6 +28,20 @@ log = logging.getLogger(__name__)
 STOP = object()
 
 
+class GlobalLimit(object):
+    """Simple class to hold a global semaphore limiter for a project. This class
+    should be treated as a singleton that is instantiated when the project is.
+    """
+
+    global_limiter = Semaphore(PARALLEL_LIMIT)
+
+    @classmethod
+    def set_global_limit(cls, value):
+        if value is None:
+            value = PARALLEL_LIMIT
+        cls.global_limiter = Semaphore(value)
+
+
 def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, parent_objects=None):
     """Runs func on objects in parallel while ensuring that func is
     ran on object only after it is ran on all its dependencies.
@@ -38,10 +54,7 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
 
     writer = ParallelStreamWriter(stream, msg)
 
-    if parent_objects:
-        display_objects = list(parent_objects)
-    else:
-        display_objects = objects
+    display_objects = list(parent_objects) if parent_objects else objects
 
     for obj in display_objects:
         writer.add_object(get_name(obj))
@@ -61,6 +74,12 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, pa
         if exception is None:
             writer.write(get_name(obj), 'done', green)
             results.append(result)
+        elif isinstance(exception, ImageNotFound):
+            # This is to bubble up ImageNotFound exceptions to the client so we
+            # can prompt the user if they want to rebuild.
+            errors[get_name(obj)] = exception.explanation
+            writer.write(get_name(obj), 'error', red)
+            error_to_reraise = exception
         elif isinstance(exception, APIError):
             errors[get_name(obj)] = exception.explanation
             writer.write(get_name(obj), 'error', red)
@@ -173,7 +192,7 @@ def producer(obj, func, results, limiter):
     The entry point for a producer thread which runs func on a single object.
     Places a tuple on the results queue once func has either returned or raised.
     """
-    with limiter:
+    with limiter, GlobalLimit.global_limiter:
         try:
             result = func(obj)
             results.put((obj, result, None))

+ 32 - 26
compose/project.py

@@ -7,6 +7,7 @@ import operator
 from functools import reduce
 
 import enum
+import six
 from docker.errors import APIError
 
 from . import parallel
@@ -330,9 +331,16 @@ class Project(object):
             service_names, stopped=True, one_off=one_off
         ), options)
 
-    def down(self, remove_image_type, include_volumes, remove_orphans=False, timeout=None):
+    def down(
+            self,
+            remove_image_type,
+            include_volumes,
+            remove_orphans=False,
+            timeout=None,
+            ignore_orphans=False):
         self.stop(one_off=OneOffFilter.include, timeout=timeout)
-        self.find_orphan_containers(remove_orphans)
+        if not ignore_orphans:
+            self.find_orphan_containers(remove_orphans)
         self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
 
         self.networks.remove()
@@ -432,14 +440,17 @@ class Project(object):
            timeout=None,
            detached=False,
            remove_orphans=False,
+           ignore_orphans=False,
            scale_override=None,
            rescale=True,
-           start=True):
-
-        warn_for_swarm_mode(self.client)
+           start=True,
+           always_recreate_deps=False,
+           reset_container_image=False,
+           renew_anonymous_volumes=False):
 
         self.initialize()
-        self.find_orphan_containers(remove_orphans)
+        if not ignore_orphans:
+            self.find_orphan_containers(remove_orphans)
 
         if scale_override is None:
             scale_override = {}
@@ -450,7 +461,8 @@ class Project(object):
 
         for svc in services:
             svc.ensure_image_exists(do_build=do_build)
-        plans = self._get_convergence_plans(services, strategy)
+        plans = self._get_convergence_plans(
+            services, strategy, always_recreate_deps=always_recreate_deps)
         scaled_services = self.get_scaled_services(services, scale_override)
 
         def do(service):
@@ -462,7 +474,9 @@ class Project(object):
                 scale_override=scale_override.get(service.name),
                 rescale=rescale,
                 start=start,
-                project_services=scaled_services
+                project_services=scaled_services,
+                reset_container_image=reset_container_image,
+                renew_anonymous_volumes=renew_anonymous_volumes,
             )
 
         def get_deps(service):
@@ -494,7 +508,7 @@ class Project(object):
         self.networks.initialize()
         self.volumes.initialize()
 
-    def _get_convergence_plans(self, services, strategy):
+    def _get_convergence_plans(self, services, strategy, always_recreate_deps=False):
         plans = {}
 
         for service in services:
@@ -509,7 +523,13 @@ class Project(object):
                 log.debug('%s has upstream changes (%s)',
                           service.name,
                           ", ".join(updated_dependencies))
-                plan = service.convergence_plan(ConvergenceStrategy.always)
+                containers_stopped = any(
+                    service.containers(stopped=True, filters={'status': ['created', 'exited']}))
+                has_links = any(c.get('HostConfig.Links') for c in service.containers())
+                if always_recreate_deps or containers_stopped or not has_links:
+                    plan = service.convergence_plan(ConvergenceStrategy.always)
+                else:
+                    plan = service.convergence_plan(strategy)
             else:
                 plan = service.convergence_plan(strategy)
 
@@ -668,24 +688,10 @@ def get_secrets(service, service_secrets, secret_defs):
     return secrets
 
 
-def warn_for_swarm_mode(client):
-    info = client.info()
-    if info.get('Swarm', {}).get('LocalNodeState') == 'active':
-        if info.get('ServerVersion', '').startswith('ucp'):
-            # UCP does multi-node scheduling with traditional Compose files.
-            return
-
-        log.warn(
-            "The Docker Engine you're using is running in swarm mode.\n\n"
-            "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
-            "All containers will be scheduled on the current node.\n\n"
-            "To deploy your application across the swarm, "
-            "use `docker stack deploy`.\n"
-        )
-
-
 class NoSuchService(Exception):
     def __init__(self, name):
+        if isinstance(name, six.binary_type):
+            name = name.decode('utf-8')
         self.name = name
         self.msg = "No such service: %s" % self.name
 

+ 41 - 19
compose/service.py

@@ -6,6 +6,7 @@ import os
 import re
 import sys
 from collections import namedtuple
+from collections import OrderedDict
 from operator import attrgetter
 
 import enum
@@ -87,6 +88,7 @@ HOST_CONFIG_KEYS = [
     'pids_limit',
     'privileged',
     'restart',
+    'runtime',
     'security_opt',
     'shm_size',
     'storage_opt',
@@ -407,7 +409,8 @@ class Service(object):
 
             return containers
 
-    def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+    def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
+                                      renew_anonymous_volumes):
             if scale is not None and len(containers) > scale:
                 self._downscale(containers[scale:], timeout)
                 containers = containers[:scale]
@@ -415,7 +418,7 @@ class Service(object):
             def recreate(container):
                 return self.recreate_container(
                     container, timeout=timeout, attach_logs=not detached,
-                    start_new_container=start
+                    start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
                 )
             containers, errors = parallel_execute(
                 containers,
@@ -466,7 +469,9 @@ class Service(object):
         )
 
     def execute_convergence_plan(self, plan, timeout=None, detached=False,
-                                 start=True, scale_override=None, rescale=True, project_services=None):
+                                 start=True, scale_override=None,
+                                 rescale=True, project_services=None,
+                                 reset_container_image=False, renew_anonymous_volumes=False):
         (action, containers) = plan
         scale = scale_override if scale_override is not None else self.scale_num
         containers = sorted(containers, key=attrgetter('number'))
@@ -484,8 +489,15 @@ class Service(object):
             scale = None
 
         if action == 'recreate':
+            if reset_container_image:
+                # Updating the image ID on the container object lets us recover old volumes if
+                # the new image uses them as well
+                img_id = self.image()['Id']
+                for c in containers:
+                    c.reset_image(img_id)
             return self._execute_convergence_recreate(
-                containers, scale, timeout, detached, start
+                containers, scale, timeout, detached, start,
+                renew_anonymous_volumes,
             )
 
         if action == 'start':
@@ -505,12 +517,8 @@ class Service(object):
 
         raise Exception("Invalid action: {}".format(action))
 
-    def recreate_container(
-            self,
-            container,
-            timeout=None,
-            attach_logs=False,
-            start_new_container=True):
+    def recreate_container(self, container, timeout=None, attach_logs=False, start_new_container=True,
+                           renew_anonymous_volumes=False):
         """Recreate a container.
 
         The original container is renamed to a temporary name so that data
@@ -521,7 +529,7 @@ class Service(object):
         container.stop(timeout=self.stop_timeout(timeout))
         container.rename_to_tmp_name()
         new_container = self.create_container(
-            previous_container=container,
+            previous_container=container if not renew_anonymous_volumes else None,
             number=container.labels.get(LABEL_CONTAINER_NUMBER),
             quiet=True,
         )
@@ -556,18 +564,25 @@ class Service(object):
             raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
         return container
 
+    @property
+    def prioritized_networks(self):
+        return OrderedDict(
+            sorted(
+                self.networks.items(),
+                key=lambda t: t[1].get('priority') or 0, reverse=True
+            )
+        )
+
     def connect_container_to_networks(self, container):
         connected_networks = container.get('NetworkSettings.Networks')
 
-        for network, netdefs in self.networks.items():
+        for network, netdefs in self.prioritized_networks.items():
             if network in connected_networks:
                 if short_id_alias_exists(container, network):
                     continue
+                self.client.disconnect_container_from_network(container.id, network)
 
-                self.client.disconnect_container_from_network(
-                    container.id,
-                    network)
-
+            log.debug('Connecting to {}'.format(network))
             self.client.connect_container_to_network(
                 container.id, network,
                 aliases=self._get_aliases(netdefs, container),
@@ -833,8 +848,14 @@ class Service(object):
         if version_gte(self.client.api_version, '1.30'):
             override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
         else:
-            override_options['binds'].extend(m.legacy_repr() for m in container_mounts)
-            container_options['volumes'].update((m.target, {}) for m in container_mounts)
+            # Workaround for 3.2 format
+            self.options['tmpfs'] = self.options.get('tmpfs') or []
+            for m in container_mounts:
+                if m.is_tmpfs:
+                    self.options['tmpfs'].append(m.target)
+                else:
+                    override_options['binds'].append(m.legacy_repr())
+                    container_options['volumes'][m.target] = {}
 
         secret_volumes = self.get_secret_volumes()
         if secret_volumes:
@@ -878,6 +899,7 @@ class Service(object):
             dns_opt=options.get('dns_opt'),
             dns_search=options.get('dns_search'),
             restart_policy=options.get('restart'),
+            runtime=options.get('runtime'),
             cap_add=options.get('cap_add'),
             cap_drop=options.get('cap_drop'),
             mem_limit=options.get('mem_limit'),
@@ -1343,7 +1365,7 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
             continue
 
         ctnr_mount = container_mounts.get(mount.target)
-        if not ctnr_mount.get('Name'):
+        if not ctnr_mount or not ctnr_mount.get('Name'):
             continue
 
         mount.source = ctnr_mount['Name']

+ 58 - 26
contrib/completion/bash/docker-compose

@@ -48,6 +48,31 @@ __docker_compose_has_option() {
 	return 1
 }
 
+# Returns `key` if we are currently completing the value of a map option (`key=value`)
+# which matches the extglob passed in as an argument.
+# This function is needed for key-specific completions.
+__docker_compose_map_key_of_current_option() {
+        local glob="$1"
+
+        local key glob_pos
+        if [ "$cur" = "=" ] ; then        # key= case
+                key="$prev"
+                glob_pos=$((cword - 2))
+        elif [[ $cur == *=* ]] ; then     # key=value case (OSX)
+                key=${cur%=*}
+                glob_pos=$((cword - 1))
+        elif [ "$prev" = "=" ] ; then
+                key=${words[$cword - 2]}  # key=value case
+                glob_pos=$((cword - 3))
+        else
+                return
+        fi
+
+        [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--))  # --option=key=value syntax
+
+        [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key"
+}
+
 # suppress trailing whitespace
 __docker_compose_nospace() {
 	# compopt is not available in ancient bash versions
@@ -64,48 +89,32 @@ __docker_compose_services_all() {
 	COMPREPLY=( $(compgen -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
 }
 
-# All services that have an entry with the given key in their compose_file section
-___docker_compose_services_with_key() {
-	# flatten sections under "services" to one line, then filter lines containing the key and return section name
-	__docker_compose_q config \
-		| sed -n -e '/^services:/,/^[^ ]/p' \
-		| sed -n 's/^  //p' \
-		| awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
-		| awk -F: -v key=": +$1:" '$0 ~ key {print $1}'
-}
-
 # All services that are defined by a Dockerfile reference
 __docker_compose_services_from_build() {
-	COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key build)" -- "$cur") )
+	COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=build")" -- "$cur") )
 }
 
 # All services that are defined by an image
 __docker_compose_services_from_image() {
-	COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key image)" -- "$cur") )
-}
-
-# The services for which containers have been created, optionally filtered
-# by a boolean expression passed in as argument.
-__docker_compose_services_with() {
-	local containers names
-	containers="$(__docker_compose_q ps -q)"
-	names=$(docker 2>/dev/null inspect -f "{{if ${1:-true}}}{{range \$k, \$v := .Config.Labels}}{{if eq \$k \"com.docker.compose.service\"}}{{\$v}}{{end}}{{end}}{{end}}" $containers)
-	COMPREPLY=( $(compgen -W "$names" -- "$cur") )
+	COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=image")" -- "$cur") )
 }
 
 # The services for which at least one paused container exists
 __docker_compose_services_paused() {
-	__docker_compose_services_with '.State.Paused'
+	names=$(__docker_compose_q ps --services --filter "status=paused")
+	COMPREPLY=( $(compgen -W "$names" -- "$cur") )
 }
 
 # The services for which at least one running container exists
 __docker_compose_services_running() {
-	__docker_compose_services_with '.State.Running'
+	names=$(__docker_compose_q ps --services --filter "status=running")
+	COMPREPLY=( $(compgen -W "$names" -- "$cur") )
 }
 
 # The services for which at least one stopped container exists
 __docker_compose_services_stopped() {
-	__docker_compose_services_with 'not .State.Running'
+	names=$(__docker_compose_q ps --services --filter "status=stopped")
+	COMPREPLY=( $(compgen -W "$names" -- "$cur") )
 }
 
 
@@ -194,11 +203,14 @@ _docker_compose_down() {
 			COMPREPLY=( $( compgen -W "all local" -- "$cur" ) )
 			return
 			;;
+		--timeout|-t)
+			return
+			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--help --rmi --volumes -v --remove-orphans" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help --rmi --timeout -t --volumes -v --remove-orphans" -- "$cur" ) )
 			;;
 	esac
 }
@@ -327,9 +339,29 @@ _docker_compose_port() {
 
 
 _docker_compose_ps() {
+	local key=$(__docker_compose_map_key_of_current_option '--filter')
+	case "$key" in
+		source)
+			COMPREPLY=( $( compgen -W "build image" -- "${cur##*=}" ) )
+			return
+			;;
+		status)
+			COMPREPLY=( $( compgen -W "paused restarting running stopped" -- "${cur##*=}" ) )
+			return
+			;;
+	esac
+
+	case "$prev" in
+		--filter)
+			COMPREPLY=( $( compgen -W "source status" -S "=" -- "$cur" ) )
+			__docker_compose_nospace
+			return;
+			;;
+	esac
+
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--help -q --services --filter" -- "$cur" ) )
 			;;
 		*)
 			__docker_compose_services_all

+ 1 - 1
requirements.txt

@@ -2,7 +2,7 @@ backports.ssl-match-hostname==3.5.0.1; python_version < '3'
 cached-property==1.3.0
 certifi==2017.4.17
 chardet==3.0.4
-docker==2.6.1
+docker==2.7.0
 docker-pycreds==0.2.1
 dockerpty==0.4.1
 docopt==0.6.2

+ 27 - 0
script/circle/bintray-deploy.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \
+  https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH}
+
+if test $? -ne 0; then
+  echo "Bintray repository ${CIRCLE_BRANCH} does not exist ; abandoning upload attempt"
+  exit 0
+fi
+
+curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST \
+  -d "{\
+    \"name\": \"${PKG_NAME}\", \"desc\": \"auto\", \"licenses\": [\"Apache-2.0\"], \
+    \"vcs_url\": \"${CIRCLE_REPOSITORY_URL}\" \
+  }" -H "Content-Type: application/json" \
+  https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH}
+
+curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST -d "{\
+    \"name\": \"$CIRCLE_BRANCH\", \
+    \"desc\": \"Automated build of the ${CIRCLE_BRANCH} branch.\", \
+  }" -H "Content-Type: application/json" \
+  https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH}/${PKG_NAME}/versions
+
+curl -f -T dist/docker-compose-${OS_NAME}-x86_64 -u$BINTRAY_USERNAME:$BINTRAY_API_KEY \
+  -H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \
+  -H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \
+  https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64 || exit 1

+ 2 - 0
script/release/download-binaries

@@ -33,5 +33,7 @@ wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
 
 echo -e "\n\nCopy the following lines into the integrity check table in the release notes:\n\n"
 cd $DESTINATION
+rm -rf *.sha256
 ls | xargs sha256sum | sed 's/  / | /g' | sed -r 's/([^ |]+)/`\1`/g'
+ls | xargs -I@ bash -c "sha256sum @ | cut -d' ' -f1 > @.sha256"
 cd -

+ 1 - 1
script/run/run.sh

@@ -15,7 +15,7 @@
 
 set -e
 
-VERSION="1.18.0"
+VERSION="1.19.0-rc1"
 IMAGE="docker/compose:$VERSION"
 
 

+ 0 - 34
script/setup/osx

@@ -10,40 +10,6 @@ openssl_version() {
   python -c "import ssl; print ssl.OPENSSL_VERSION"
 }
 
-desired_python_version="2.7.12"
-desired_python_brew_version="2.7.12"
-python_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/737a2e34a89b213c1f0a2a24fc1a3c06635eed04/Formula/python.rb"
-
-desired_openssl_version="1.0.2j"
-desired_openssl_brew_version="1.0.2j"
-openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
-
-PATH="/usr/local/bin:$PATH"
-
-if !(which brew); then
-  ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-fi
-
-brew update > /dev/null
-
-if !(python_version | grep "$desired_python_version"); then
-  if brew list | grep python; then
-    brew unlink python
-  fi
-
-  brew install "$python_formula"
-  brew switch python "$desired_python_brew_version"
-fi
-
-if !(openssl_version | grep "$desired_openssl_version"); then
-  if brew list | grep openssl; then
-    brew unlink openssl
-  fi
-
-  brew install "$openssl_formula"
-  brew switch openssl "$desired_openssl_brew_version"
-fi
-
 echo "*** Using $(python_version)"
 echo "*** Using $(openssl_version)"
 

+ 0 - 29
script/travis/bintray.json.tmpl

@@ -1,29 +0,0 @@
-{
-    "package": {
-        "name": "${TRAVIS_OS_NAME}",
-        "repo": "${TRAVIS_BRANCH}",
-        "subject": "docker-compose",
-        "desc": "Automated build of master branch from travis ci.",
-        "website_url": "https://github.com/docker/compose",
-        "issue_tracker_url": "https://github.com/docker/compose/issues",
-        "vcs_url": "https://github.com/docker/compose.git",
-        "licenses": ["Apache-2.0"]
-    },
-
-    "version": {
-        "name": "${TRAVIS_BRANCH}",
-        "desc": "Automated build of the ${TRAVIS_BRANCH} branch.",
-        "released": "${DATE}",
-        "vcs_tag": "master"
-    },
-
-    "files": [
-        {
-            "includePattern": "dist/(.*)",
-            "excludePattern": ".*\.tar.gz",
-            "uploadPattern": "$1",
-            "matrixParams": { "override": 1 }
-        }
-    ],
-    "publish": true
-}

+ 0 - 13
script/travis/build-binary

@@ -1,13 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
-    script/build/linux
-    # TODO: requires auth to push, so disable for now
-    # script/build/image master
-    # docker push docker/compose:master
-else
-    script/setup/osx
-    script/build/osx
-fi

+ 0 - 10
script/travis/ci

@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
-    tox -e py27,py34 -- tests/unit
-else
-    # TODO: we could also install py34 and test against it
-    tox -e py27 -- tests/unit
-fi

+ 0 - 10
script/travis/install

@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
-    pip install tox==2.1.1
-else
-    sudo pip install --upgrade pip tox==2.1.1 virtualenv
-    pip --version
-fi

+ 0 - 13
script/travis/render-bintray-config.py

@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import datetime
-import os.path
-import sys
-
-os.environ['DATE'] = str(datetime.date.today())
-
-for line in sys.stdin:
-    print(os.path.expandvars(line), end='')

+ 1 - 1
setup.py

@@ -36,7 +36,7 @@ install_requires = [
     'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
     'texttable >= 0.9.0, < 0.10',
     'websocket-client >= 0.32.0, < 1.0',
-    'docker >= 2.6.1, < 3.0',
+    'docker >= 2.7.0, < 3.0',
     'dockerpty >= 0.4.1, < 0.5',
     'six >= 1.3.0, < 2',
     'jsonschema >= 2.5.1, < 3',

+ 219 - 197
tests/acceptance/cli_test.py

@@ -474,9 +474,9 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'])
         result = self.dispatch(['ps'])
 
-        self.assertIn('multiplecomposefiles_simple_1', result.stdout)
-        self.assertIn('multiplecomposefiles_another_1', result.stdout)
-        self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
+        assert 'multiplecomposefiles_simple_1' in result.stdout
+        assert 'multiplecomposefiles_another_1' in result.stdout
+        assert 'multiplecomposefiles_yetanother_1' not in result.stdout
 
     def test_ps_alternate_composefile(self):
         config_path = os.path.abspath(
@@ -487,9 +487,37 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
         result = self.dispatch(['-f', 'compose2.yml', 'ps'])
 
-        self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
-        self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
-        self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
+        assert 'multiplecomposefiles_simple_1' not in result.stdout
+        assert 'multiplecomposefiles_another_1' not in result.stdout
+        assert 'multiplecomposefiles_yetanother_1' in result.stdout
+
+    def test_ps_services_filter_option(self):
+        self.base_dir = 'tests/fixtures/ps-services-filter'
+        image = self.dispatch(['ps', '--services', '--filter', 'source=image'])
+        build = self.dispatch(['ps', '--services', '--filter', 'source=build'])
+        all_services = self.dispatch(['ps', '--services'])
+
+        assert 'with_build' in all_services.stdout
+        assert 'with_image' in all_services.stdout
+        assert 'with_build' in build.stdout
+        assert 'with_build' not in image.stdout
+        assert 'with_image' in image.stdout
+        assert 'with_image' not in build.stdout
+
+    def test_ps_services_filter_status(self):
+        self.base_dir = 'tests/fixtures/ps-services-filter'
+        self.dispatch(['up', '-d'])
+        self.dispatch(['pause', 'with_image'])
+        paused = self.dispatch(['ps', '--services', '--filter', 'status=paused'])
+        stopped = self.dispatch(['ps', '--services', '--filter', 'status=stopped'])
+        running = self.dispatch(['ps', '--services', '--filter', 'status=running'])
+
+        assert 'with_build' not in stopped.stdout
+        assert 'with_image' not in stopped.stdout
+        assert 'with_build' not in paused.stdout
+        assert 'with_image' in paused.stdout
+        assert 'with_build' in running.stdout
+        assert 'with_image' in running.stdout
 
     def test_pull(self):
         result = self.dispatch(['pull'])
@@ -518,23 +546,26 @@ class CLITestCase(DockerClientTestCase):
                 'image library/nonexisting-image:latest not found' in result.stderr or
                 'pull access denied for nonexisting-image' in result.stderr)
 
+    def test_pull_with_quiet(self):
+        assert self.dispatch(['pull', '--quiet']).stderr == ''
+        assert self.dispatch(['pull', '--quiet']).stdout == ''
+
     def test_pull_with_parallel_failure(self):
         result = self.dispatch([
             '-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
             returncode=1
         )
 
-        self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
-        self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
-        self.assertRegexpMatches(result.stderr,
-                                 re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
-        self.assertRegexpMatches(result.stderr,
-                                 re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
-                                            re.MULTILINE))
-
-    def test_pull_with_quiet(self):
-        assert self.dispatch(['pull', '--quiet']).stderr == ''
-        assert self.dispatch(['pull', '--quiet']).stdout == ''
+        assert re.search(re.compile('^Pulling simple', re.MULTILINE), result.stderr)
+        assert re.search(re.compile('^Pulling another', re.MULTILINE), result.stderr)
+        assert re.search(
+            re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE),
+            result.stderr
+        )
+        assert re.search(
+            re.compile('''^(ERROR: )?(b')?.* nonexisting-image''', re.MULTILINE),
+            result.stderr
+        )
 
     def test_build_plain(self):
         self.base_dir = 'tests/fixtures/simple-dockerfile'
@@ -573,7 +604,6 @@ class CLITestCase(DockerClientTestCase):
         assert BUILD_CACHE_TEXT not in result.stdout
         assert BUILD_PULL_TEXT in result.stdout
 
-    @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
     def test_build_failed(self):
         self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
         self.dispatch(['build', 'simple'], returncode=1)
@@ -587,7 +617,6 @@ class CLITestCase(DockerClientTestCase):
         ]
         assert len(containers) == 1
 
-    @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
     def test_build_failed_forcerm(self):
         self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
         self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
@@ -809,36 +838,36 @@ class CLITestCase(DockerClientTestCase):
     def test_down_timeout(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
         ""
 
         self.dispatch(['down', '-t', '1'], None)
 
-        self.assertEqual(len(service.containers(stopped=True)), 0)
+        assert len(service.containers(stopped=True)) == 0
 
     def test_down_signal(self):
         self.base_dir = 'tests/fixtures/stop-signal-composefile'
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
 
         self.dispatch(['down', '-t', '1'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
+        assert len(service.containers(stopped=True)) == 0
 
     def test_up_detached(self):
         self.dispatch(['up', '-d'])
         service = self.project.get_service('simple')
         another = self.project.get_service('another')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertEqual(len(another.containers()), 1)
+        assert len(service.containers()) == 1
+        assert len(another.containers()) == 1
 
         # Ensure containers don't have stdin and stdout connected in -d mode
         container, = service.containers()
-        self.assertFalse(container.get('Config.AttachStderr'))
-        self.assertFalse(container.get('Config.AttachStdout'))
-        self.assertFalse(container.get('Config.AttachStdin'))
+        assert not container.get('Config.AttachStderr')
+        assert not container.get('Config.AttachStdout')
+        assert not container.get('Config.AttachStdin')
 
     def test_up_attached(self):
         self.base_dir = 'tests/fixtures/echo-services'
@@ -858,7 +887,7 @@ class CLITestCase(DockerClientTestCase):
 
         network_name = self.project.networks.networks['default'].full_name
         networks = self.client.networks(names=[network_name])
-        self.assertEqual(len(networks), 1)
+        assert len(networks) == 1
         assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
         assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
 
@@ -866,17 +895,17 @@ class CLITestCase(DockerClientTestCase):
 
         for service in services:
             containers = service.containers()
-            self.assertEqual(len(containers), 1)
+            assert len(containers) == 1
 
             container = containers[0]
-            self.assertIn(container.id, network['Containers'])
+            assert container.id in network['Containers']
 
             networks = container.get('NetworkSettings.Networks')
-            self.assertEqual(list(networks), [network['Name']])
+            assert list(networks) == [network['Name']]
 
-            self.assertEqual(
-                sorted(networks[network['Name']]['Aliases']),
-                sorted([service.name, container.short_id]))
+            assert sorted(networks[network['Name']]['Aliases']) == sorted(
+                [service.name, container.short_id]
+            )
 
             for service in services:
                 assert self.lookup(container, service.name)
@@ -1213,13 +1242,13 @@ class CLITestCase(DockerClientTestCase):
         console = self.project.get_service('console')
 
         # console was not started
-        self.assertEqual(len(web.containers()), 1)
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(console.containers()), 0)
+        assert len(web.containers()) == 1
+        assert len(db.containers()) == 1
+        assert len(console.containers()) == 0
 
         # web has links
         web_container = web.containers()[0]
-        self.assertTrue(web_container.get('HostConfig.Links'))
+        assert web_container.get('HostConfig.Links')
 
     def test_up_with_net_is_invalid(self):
         self.base_dir = 'tests/fixtures/net-container'
@@ -1241,8 +1270,9 @@ class CLITestCase(DockerClientTestCase):
         foo = self.project.get_service('foo')
         foo_container = foo.containers()[0]
 
-        assert foo_container.get('HostConfig.NetworkMode') == \
-            'container:{}'.format(bar_container.id)
+        assert foo_container.get('HostConfig.NetworkMode') == 'container:{}'.format(
+            bar_container.id
+        )
 
     @v3_only()
     def test_up_with_healthcheck(self):
@@ -1294,46 +1324,55 @@ class CLITestCase(DockerClientTestCase):
         web = self.project.get_service('web')
         db = self.project.get_service('db')
         console = self.project.get_service('console')
-        self.assertEqual(len(web.containers()), 1)
-        self.assertEqual(len(db.containers()), 0)
-        self.assertEqual(len(console.containers()), 0)
+        assert len(web.containers()) == 1
+        assert len(db.containers()) == 0
+        assert len(console.containers()) == 0
 
     def test_up_with_force_recreate(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
 
         old_ids = [c.id for c in service.containers()]
 
         self.dispatch(['up', '-d', '--force-recreate'], None)
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
 
         new_ids = [c.id for c in service.containers()]
 
-        self.assertNotEqual(old_ids, new_ids)
+        assert old_ids != new_ids
 
     def test_up_with_no_recreate(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
 
         old_ids = [c.id for c in service.containers()]
 
         self.dispatch(['up', '-d', '--no-recreate'], None)
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
 
         new_ids = [c.id for c in service.containers()]
 
-        self.assertEqual(old_ids, new_ids)
+        assert old_ids == new_ids
 
     def test_up_with_force_recreate_and_no_recreate(self):
         self.dispatch(
             ['up', '-d', '--force-recreate', '--no-recreate'],
             returncode=1)
 
-    def test_up_with_timeout_detached(self):
-        result = self.dispatch(['up', '-d', '-t', '1'], returncode=1)
-        assert "-d and --timeout cannot be combined." in result.stderr
+    def test_up_with_timeout(self):
+        self.dispatch(['up', '-d', '-t', '1'])
+        service = self.project.get_service('simple')
+        another = self.project.get_service('another')
+        assert len(service.containers()) == 1
+        assert len(another.containers()) == 1
+
+    @mock.patch.dict(os.environ)
+    def test_up_with_ignore_remove_orphans(self):
+        os.environ["COMPOSE_IGNORE_ORPHANS"] = "True"
+        result = self.dispatch(['up', '-d', '--remove-orphans'], returncode=1)
+        assert "COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined." in result.stderr
 
     def test_up_handles_sigint(self):
         proc = start_process(self.base_dir, ['up', '-t', '2'])
@@ -1365,14 +1404,14 @@ class CLITestCase(DockerClientTestCase):
         proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
         wait_on_condition(ContainerCountCondition(self.project, 0))
         proc.wait()
-        self.assertEqual(proc.returncode, 0)
+        assert proc.returncode == 0
 
     def test_up_handles_abort_on_container_exit_code(self):
         self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
         proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
         wait_on_condition(ContainerCountCondition(self.project, 0))
         proc.wait()
-        self.assertEqual(proc.returncode, 1)
+        assert proc.returncode == 1
 
     @v2_only()
     @no_cluster('Container PID mode does not work across clusters')
@@ -1403,27 +1442,27 @@ class CLITestCase(DockerClientTestCase):
     def test_exec_without_tty(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['up', '-d', 'console'])
-        self.assertEqual(len(self.project.containers()), 1)
+        assert len(self.project.containers()) == 1
 
         stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
-        self.assertEqual(stderr, "")
-        self.assertEqual(stdout, "/\n")
+        assert stderr == ""
+        assert stdout == "/\n"
 
     def test_exec_custom_user(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['up', '-d', 'console'])
-        self.assertEqual(len(self.project.containers()), 1)
+        assert len(self.project.containers()) == 1
 
         stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
-        self.assertEqual(stdout, "operator\n")
-        self.assertEqual(stderr, "")
+        assert stdout == "operator\n"
+        assert stderr == ""
 
     @v2_2_only()
     def test_exec_service_with_environment_overridden(self):
         name = 'service'
         self.base_dir = 'tests/fixtures/environment-exec'
         self.dispatch(['up', '-d'])
-        self.assertEqual(len(self.project.containers()), 1)
+        assert len(self.project.containers()) == 1
 
         stdout, stderr = self.dispatch([
             'exec',
@@ -1441,27 +1480,27 @@ class CLITestCase(DockerClientTestCase):
         # added option from command line
         assert 'alpha=beta' in stdout
 
-        self.assertEqual(stderr, '')
+        assert stderr == ''
 
     def test_run_service_without_links(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['run', 'console', '/bin/true'])
-        self.assertEqual(len(self.project.containers()), 0)
+        assert len(self.project.containers()) == 0
 
         # Ensure stdin/out was open
         container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
         config = container.inspect()['Config']
-        self.assertTrue(config['AttachStderr'])
-        self.assertTrue(config['AttachStdout'])
-        self.assertTrue(config['AttachStdin'])
+        assert config['AttachStderr']
+        assert config['AttachStdout']
+        assert config['AttachStdin']
 
     def test_run_service_with_links(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['run', 'web', '/bin/true'], None)
         db = self.project.get_service('db')
         console = self.project.get_service('console')
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(console.containers()), 0)
+        assert len(db.containers()) == 1
+        assert len(console.containers()) == 0
 
     @v2_only()
     def test_run_service_with_dependencies(self):
@@ -1469,8 +1508,8 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', 'web', '/bin/true'], None)
         db = self.project.get_service('db')
         console = self.project.get_service('console')
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(console.containers()), 0)
+        assert len(db.containers()) == 1
+        assert len(console.containers()) == 0
 
     def test_run_service_with_scaled_dependencies(self):
         self.base_dir = 'tests/fixtures/v2-dependencies'
@@ -1487,22 +1526,22 @@ class CLITestCase(DockerClientTestCase):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
         db = self.project.get_service('db')
-        self.assertEqual(len(db.containers()), 0)
+        assert len(db.containers()) == 0
 
     def test_run_does_not_recreate_linked_containers(self):
         self.base_dir = 'tests/fixtures/links-composefile'
         self.dispatch(['up', '-d', 'db'])
         db = self.project.get_service('db')
-        self.assertEqual(len(db.containers()), 1)
+        assert len(db.containers()) == 1
 
         old_ids = [c.id for c in db.containers()]
 
         self.dispatch(['run', 'web', '/bin/true'], None)
-        self.assertEqual(len(db.containers()), 1)
+        assert len(db.containers()) == 1
 
         new_ids = [c.id for c in db.containers()]
 
-        self.assertEqual(old_ids, new_ids)
+        assert old_ids == new_ids
 
     def test_run_without_command(self):
         self.base_dir = 'tests/fixtures/commands-composefile'
@@ -1511,18 +1550,12 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', 'implicit'])
         service = self.project.get_service('implicit')
         containers = service.containers(stopped=True, one_off=OneOffFilter.only)
-        self.assertEqual(
-            [c.human_readable_command for c in containers],
-            [u'/bin/sh -c echo "success"'],
-        )
+        assert [c.human_readable_command for c in containers] == [u'/bin/sh -c echo "success"']
 
         self.dispatch(['run', 'explicit'])
         service = self.project.get_service('explicit')
         containers = service.containers(stopped=True, one_off=OneOffFilter.only)
-        self.assertEqual(
-            [c.human_readable_command for c in containers],
-            [u'/bin/true'],
-        )
+        assert [c.human_readable_command for c in containers] == [u'/bin/true']
 
     @pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
     def test_run_rm(self):
@@ -1534,7 +1567,7 @@ class CLITestCase(DockerClientTestCase):
             'running'))
         service = self.project.get_service('test')
         containers = service.containers(one_off=OneOffFilter.only)
-        self.assertEqual(len(containers), 1)
+        assert len(containers) == 1
         mounts = containers[0].get('Mounts')
         for mount in mounts:
             if mount['Destination'] == '/container-path':
@@ -1543,7 +1576,7 @@ class CLITestCase(DockerClientTestCase):
         os.kill(proc.pid, signal.SIGINT)
         wait_on_process(proc, 1)
 
-        self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+        assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
 
         volumes = self.client.volumes()['Volumes']
         assert volumes is not None
@@ -1611,7 +1644,7 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
         service = self.project.get_service(name)
         container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
-        self.assertEqual(user, container.get('Config.User'))
+        assert user == container.get('Config.User')
 
     def test_run_service_with_user_overridden_short_form(self):
         self.base_dir = 'tests/fixtures/user-composefile'
@@ -1620,7 +1653,7 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', '-u', user, name], returncode=1)
         service = self.project.get_service(name)
         container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
-        self.assertEqual(user, container.get('Config.User'))
+        assert user == container.get('Config.User')
 
     def test_run_service_with_environment_overridden(self):
         name = 'service'
@@ -1635,13 +1668,13 @@ class CLITestCase(DockerClientTestCase):
         service = self.project.get_service(name)
         container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
         # env overridden
-        self.assertEqual('notbar', container.environment['foo'])
+        assert 'notbar' == container.environment['foo']
         # keep environment from yaml
-        self.assertEqual('world', container.environment['hello'])
+        assert 'world' == container.environment['hello']
         # added option from command line
-        self.assertEqual('beta', container.environment['alpha'])
+        assert 'beta' == container.environment['alpha']
         # make sure a value with a = don't crash out
-        self.assertEqual('moto=bobo', container.environment['allo'])
+        assert 'moto=bobo' == container.environment['allo']
 
     def test_run_service_without_map_ports(self):
         # create one off container
@@ -1657,8 +1690,8 @@ class CLITestCase(DockerClientTestCase):
         container.stop()
 
         # check the ports
-        self.assertEqual(port_random, None)
-        self.assertEqual(port_assigned, None)
+        assert port_random is None
+        assert port_assigned is None
 
     def test_run_service_with_map_ports(self):
         # create one off container
@@ -1716,8 +1749,8 @@ class CLITestCase(DockerClientTestCase):
         container.stop()
 
         # check the ports
-        self.assertEqual(port_short, "127.0.0.1:30000")
-        self.assertEqual(port_full, "127.0.0.1:30001")
+        assert port_short == "127.0.0.1:30000"
+        assert port_full == "127.0.0.1:30001"
 
     def test_run_with_expose_ports(self):
         # create one off container
@@ -1726,7 +1759,7 @@ class CLITestCase(DockerClientTestCase):
         container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
 
         ports = container.ports
-        self.assertEqual(len(ports), 9)
+        assert len(ports) == 9
         # exposed ports are not mapped to host ports
         assert ports['3000/tcp'] is None
         assert ports['3001/tcp'] is None
@@ -1748,7 +1781,7 @@ class CLITestCase(DockerClientTestCase):
 
         service = self.project.get_service('service')
         container, = service.containers(stopped=True, one_off=OneOffFilter.only)
-        self.assertEqual(container.name, name)
+        assert container.name == name
 
     def test_run_service_with_workdir_overridden(self):
         self.base_dir = 'tests/fixtures/run-workdir'
@@ -1757,7 +1790,7 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name])
         service = self.project.get_service(name)
         container = service.containers(stopped=True, one_off=True)[0]
-        self.assertEqual(workdir, container.get('Config.WorkingDir'))
+        assert workdir == container.get('Config.WorkingDir')
 
     def test_run_service_with_workdir_overridden_short_form(self):
         self.base_dir = 'tests/fixtures/run-workdir'
@@ -1766,7 +1799,7 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['run', '-w', workdir, name])
         service = self.project.get_service(name)
         container = service.containers(stopped=True, one_off=True)[0]
-        self.assertEqual(workdir, container.get('Config.WorkingDir'))
+        assert workdir == container.get('Config.WorkingDir')
 
     @v2_only()
     def test_run_interactive_connects_to_network(self):
@@ -1854,7 +1887,7 @@ class CLITestCase(DockerClientTestCase):
         result = self.dispatch(['run', 'simple'])
 
         if six.PY2:  # Can't retrieve output on Py3. See issue #3670
-            assert value == result.stdout.strip()
+            assert value in result.stdout.strip()
 
         container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
         environment = container.get('Config.Env')
@@ -1887,19 +1920,19 @@ class CLITestCase(DockerClientTestCase):
         service = self.project.get_service('simple')
         service.create_container()
         kill_service(service)
-        self.assertEqual(len(service.containers(stopped=True)), 1)
+        assert len(service.containers(stopped=True)) == 1
         self.dispatch(['rm', '--force'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
+        assert len(service.containers(stopped=True)) == 0
         service = self.project.get_service('simple')
         service.create_container()
         kill_service(service)
-        self.assertEqual(len(service.containers(stopped=True)), 1)
+        assert len(service.containers(stopped=True)) == 1
         self.dispatch(['rm', '-f'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
+        assert len(service.containers(stopped=True)) == 0
         service = self.project.get_service('simple')
         service.create_container()
         self.dispatch(['rm', '-fs'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
+        assert len(service.containers(stopped=True)) == 0
 
     def test_rm_stop(self):
         self.dispatch(['up', '-d'], None)
@@ -1923,43 +1956,43 @@ class CLITestCase(DockerClientTestCase):
         service.create_container(one_off=False)
         service.create_container(one_off=True)
         kill_service(service)
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+        assert len(service.containers(stopped=True)) == 1
+        assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1
         self.dispatch(['rm', '-f'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
-        self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+        assert len(service.containers(stopped=True)) == 0
+        assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
 
         service.create_container(one_off=False)
         service.create_container(one_off=True)
         kill_service(service)
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+        assert len(service.containers(stopped=True)) == 1
+        assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1
         self.dispatch(['rm', '-f', '--all'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 0)
-        self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+        assert len(service.containers(stopped=True)) == 0
+        assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
 
     def test_stop(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
 
         self.dispatch(['stop', '-t', '1'], None)
 
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertFalse(service.containers(stopped=True)[0].is_running)
+        assert len(service.containers(stopped=True)) == 1
+        assert not service.containers(stopped=True)[0].is_running
 
     def test_stop_signal(self):
         self.base_dir = 'tests/fixtures/stop-signal-composefile'
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
 
         self.dispatch(['stop', '-t', '1'], None)
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertFalse(service.containers(stopped=True)[0].is_running)
-        self.assertEqual(service.containers(stopped=True)[0].exit_code, 0)
+        assert len(service.containers(stopped=True)) == 1
+        assert not service.containers(stopped=True)[0].is_running
+        assert service.containers(stopped=True)[0].exit_code == 0
 
     def test_start_no_containers(self):
         result = self.dispatch(['start'], returncode=1)
@@ -1971,39 +2004,39 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'])
         simple = self.project.get_service('simple').containers()[0]
         log_config = simple.get('HostConfig.LogConfig')
-        self.assertTrue(log_config)
-        self.assertEqual(log_config.get('Type'), 'none')
+        assert log_config
+        assert log_config.get('Type') == 'none'
 
         another = self.project.get_service('another').containers()[0]
         log_config = another.get('HostConfig.LogConfig')
-        self.assertTrue(log_config)
-        self.assertEqual(log_config.get('Type'), 'json-file')
-        self.assertEqual(log_config.get('Config')['max-size'], '10m')
+        assert log_config
+        assert log_config.get('Type') == 'json-file'
+        assert log_config.get('Config')['max-size'] == '10m'
 
     def test_up_logging_legacy(self):
         self.base_dir = 'tests/fixtures/logging-composefile-legacy'
         self.dispatch(['up', '-d'])
         simple = self.project.get_service('simple').containers()[0]
         log_config = simple.get('HostConfig.LogConfig')
-        self.assertTrue(log_config)
-        self.assertEqual(log_config.get('Type'), 'none')
+        assert log_config
+        assert log_config.get('Type') == 'none'
 
         another = self.project.get_service('another').containers()[0]
         log_config = another.get('HostConfig.LogConfig')
-        self.assertTrue(log_config)
-        self.assertEqual(log_config.get('Type'), 'json-file')
-        self.assertEqual(log_config.get('Config')['max-size'], '10m')
+        assert log_config
+        assert log_config.get('Type') == 'json-file'
+        assert log_config.get('Config')['max-size'] == '10m'
 
     def test_pause_unpause(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertFalse(service.containers()[0].is_paused)
+        assert not service.containers()[0].is_paused
 
         self.dispatch(['pause'], None)
-        self.assertTrue(service.containers()[0].is_paused)
+        assert service.containers()[0].is_paused
 
         self.dispatch(['unpause'], None)
-        self.assertFalse(service.containers()[0].is_paused)
+        assert not service.containers()[0].is_paused
 
     def test_pause_no_containers(self):
         result = self.dispatch(['pause'], returncode=1)
@@ -2077,7 +2110,7 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'])
 
         result = self.dispatch(['logs', '-f', '-t'])
-        self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})')
+        assert re.search('(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
 
     def test_logs_tail(self):
         self.base_dir = 'tests/fixtures/logs-tail-composefile'
@@ -2092,36 +2125,36 @@ class CLITestCase(DockerClientTestCase):
     def test_kill(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
 
         self.dispatch(['kill'], None)
 
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertFalse(service.containers(stopped=True)[0].is_running)
+        assert len(service.containers(stopped=True)) == 1
+        assert not service.containers(stopped=True)[0].is_running
 
     def test_kill_signal_sigstop(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
 
         self.dispatch(['kill', '-s', 'SIGSTOP'], None)
 
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
         # The container is still running. It has only been paused
-        self.assertTrue(service.containers()[0].is_running)
+        assert service.containers()[0].is_running
 
     def test_kill_stopped_service(self):
         self.dispatch(['up', '-d'], None)
         service = self.project.get_service('simple')
         self.dispatch(['kill', '-s', 'SIGSTOP'], None)
-        self.assertTrue(service.containers()[0].is_running)
+        assert service.containers()[0].is_running
 
         self.dispatch(['kill', '-s', 'SIGKILL'], None)
 
-        self.assertEqual(len(service.containers(stopped=True)), 1)
-        self.assertFalse(service.containers(stopped=True)[0].is_running)
+        assert len(service.containers(stopped=True)) == 1
+        assert not service.containers(stopped=True)[0].is_running
 
     def test_restart(self):
         service = self.project.get_service('simple')
@@ -2130,23 +2163,17 @@ class CLITestCase(DockerClientTestCase):
         started_at = container.dictionary['State']['StartedAt']
         self.dispatch(['restart', '-t', '1'], None)
         container.inspect()
-        self.assertNotEqual(
-            container.dictionary['State']['FinishedAt'],
-            '0001-01-01T00:00:00Z',
-        )
-        self.assertNotEqual(
-            container.dictionary['State']['StartedAt'],
-            started_at,
-        )
+        assert container.dictionary['State']['FinishedAt'] != '0001-01-01T00:00:00Z'
+        assert container.dictionary['State']['StartedAt'] != started_at
 
     def test_restart_stopped_container(self):
         service = self.project.get_service('simple')
         container = service.create_container()
         container.start()
         container.kill()
-        self.assertEqual(len(service.containers(stopped=True)), 1)
+        assert len(service.containers(stopped=True)) == 1
         self.dispatch(['restart', '-t', '1'], None)
-        self.assertEqual(len(service.containers(stopped=False)), 1)
+        assert len(service.containers(stopped=False)) == 1
 
     def test_restart_no_containers(self):
         result = self.dispatch(['restart'], returncode=1)
@@ -2156,23 +2183,23 @@ class CLITestCase(DockerClientTestCase):
         project = self.project
 
         self.dispatch(['scale', 'simple=1'])
-        self.assertEqual(len(project.get_service('simple').containers()), 1)
+        assert len(project.get_service('simple').containers()) == 1
 
         self.dispatch(['scale', 'simple=3', 'another=2'])
-        self.assertEqual(len(project.get_service('simple').containers()), 3)
-        self.assertEqual(len(project.get_service('another').containers()), 2)
+        assert len(project.get_service('simple').containers()) == 3
+        assert len(project.get_service('another').containers()) == 2
 
         self.dispatch(['scale', 'simple=1', 'another=1'])
-        self.assertEqual(len(project.get_service('simple').containers()), 1)
-        self.assertEqual(len(project.get_service('another').containers()), 1)
+        assert len(project.get_service('simple').containers()) == 1
+        assert len(project.get_service('another').containers()) == 1
 
         self.dispatch(['scale', 'simple=1', 'another=1'])
-        self.assertEqual(len(project.get_service('simple').containers()), 1)
-        self.assertEqual(len(project.get_service('another').containers()), 1)
+        assert len(project.get_service('simple').containers()) == 1
+        assert len(project.get_service('another').containers()) == 1
 
         self.dispatch(['scale', 'simple=0', 'another=0'])
-        self.assertEqual(len(project.get_service('simple').containers()), 0)
-        self.assertEqual(len(project.get_service('another').containers()), 0)
+        assert len(project.get_service('simple').containers()) == 0
+        assert len(project.get_service('another').containers()) == 0
 
     def test_scale_v2_2(self):
         self.base_dir = 'tests/fixtures/scale'
@@ -2267,10 +2294,10 @@ class CLITestCase(DockerClientTestCase):
                 result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
             return result.stdout.rstrip()
 
-        self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
-        self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
-        self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
-        self.assertEqual(get_port(3002), "")
+        assert get_port(3000) == containers[0].get_local_port(3000)
+        assert get_port(3000, index=1) == containers[0].get_local_port(3000)
+        assert get_port(3000, index=2) == containers[1].get_local_port(3000)
+        assert get_port(3002) == ""
 
     def test_events_json(self):
         events_proc = start_process(self.base_dir, ['events', '--json'])
@@ -2321,8 +2348,8 @@ class CLITestCase(DockerClientTestCase):
         self._project = get_project(self.base_dir, [config_path])
 
         containers = self.project.containers(stopped=True)
-        self.assertEqual(len(containers), 1)
-        self.assertIn("FOO=1", containers[0].get('Config.Env'))
+        assert len(containers) == 1
+        assert "FOO=1" in containers[0].get('Config.Env')
 
     @mock.patch.dict(os.environ)
     def test_home_and_env_var_in_volume_path(self):
@@ -2342,11 +2369,11 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'], None)
 
         containers = self.project.containers()
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
 
         web, db = containers
-        self.assertEqual(web.human_readable_command, 'top')
-        self.assertEqual(db.human_readable_command, 'top')
+        assert web.human_readable_command == 'top'
+        assert db.human_readable_command == 'top'
 
     def test_up_with_multiple_files(self):
         self.base_dir = 'tests/fixtures/override-files'
@@ -2366,21 +2393,18 @@ class CLITestCase(DockerClientTestCase):
             None)
 
         containers = self.project.containers()
-        self.assertEqual(len(containers), 3)
+        assert len(containers) == 3
 
         web, other, db = containers
-        self.assertEqual(web.human_readable_command, 'top')
-        self.assertEqual(db.human_readable_command, 'top')
-        self.assertEqual(other.human_readable_command, 'top')
+        assert web.human_readable_command == 'top'
+        assert db.human_readable_command == 'top'
+        assert other.human_readable_command == 'top'
 
     def test_up_with_extends(self):
         self.base_dir = 'tests/fixtures/extends'
         self.dispatch(['up', '-d'], None)
 
-        self.assertEqual(
-            set([s.name for s in self.project.services]),
-            set(['mydb', 'myweb']),
-        )
+        assert set([s.name for s in self.project.services]) == set(['mydb', 'myweb'])
 
         # Sort by name so we get [db, web]
         containers = sorted(
@@ -2388,19 +2412,17 @@ class CLITestCase(DockerClientTestCase):
             key=lambda c: c.name,
         )
 
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
         web = containers[1]
 
-        self.assertEqual(
-            set(get_links(web)),
-            set(['db', 'mydb_1', 'extends_mydb_1']))
+        assert set(get_links(web)) == set(['db', 'mydb_1', 'extends_mydb_1'])
 
         expected_env = set([
             "FOO=1",
             "BAR=2",
             "BAZ=2",
         ])
-        self.assertTrue(expected_env <= set(web.get('Config.Env')))
+        assert expected_env <= set(web.get('Config.Env'))
 
     def test_top_services_not_running(self):
         self.base_dir = 'tests/fixtures/top'
@@ -2412,9 +2434,9 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'])
         result = self.dispatch(['top'])
 
-        self.assertIn('top_service_a', result.stdout)
-        self.assertIn('top_service_b', result.stdout)
-        self.assertNotIn('top_not_a_service', result.stdout)
+        assert 'top_service_a' in result.stdout
+        assert 'top_service_b' in result.stdout
+        assert 'top_not_a_service' not in result.stdout
 
     def test_top_processes_running(self):
         self.base_dir = 'tests/fixtures/top'
@@ -2473,14 +2495,14 @@ class CLITestCase(DockerClientTestCase):
         self.dispatch(['up', '-d'], None)
 
         containers = self.project.containers()
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
 
         web, db = containers
-        self.assertEqual(web.human_readable_command, 'sleep 100')
-        self.assertEqual(db.human_readable_command, 'top')
+        assert web.human_readable_command == 'sleep 100'
+        assert db.human_readable_command == 'top'
 
     def test_up_with_duplicate_override_yaml_files(self):
         self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
-        with self.assertRaises(DuplicateOverrideFileFound):
+        with pytest.raises(DuplicateOverrideFileFound):
             get_project(self.base_dir, [])
         self.base_dir = None

+ 6 - 0
tests/fixtures/ps-services-filter/docker-compose.yml

@@ -0,0 +1,6 @@
+with_image:
+  image: busybox:latest
+  command: top
+with_build:
+  build: ../build-ctx/
+  command: top

+ 0 - 0
tests/fixtures/tls/key.key → tests/fixtures/tls/key.pem


+ 260 - 118
tests/integration/project_test.py

@@ -22,6 +22,7 @@ from compose.config.types import VolumeSpec
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import COMPOSEFILE_V2_1 as V2_1
 from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
 from compose.const import COMPOSEFILE_V3_1 as V3_1
 from compose.const import LABEL_PROJECT
 from compose.const import LABEL_SERVICE
@@ -31,6 +32,7 @@ from compose.errors import NoHealthCheckConfigured
 from compose.project import Project
 from compose.project import ProjectError
 from compose.service import ConvergenceStrategy
+from tests.integration.testcases import if_runtime_available
 from tests.integration.testcases import is_cluster
 from tests.integration.testcases import no_cluster
 from tests.integration.testcases import v2_1_only
@@ -61,7 +63,7 @@ class ProjectTest(DockerClientTestCase):
         project.up()
 
         containers = project.containers()
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
 
     @pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug')
     def test_containers_stopped(self):
@@ -85,9 +87,7 @@ class ProjectTest(DockerClientTestCase):
         project.up()
 
         containers = project.containers(['web'])
-        self.assertEqual(
-            [c.name for c in containers],
-            ['composetest_web_1'])
+        assert [c.name for c in containers] == ['composetest_web_1']
 
     def test_containers_with_extra_service(self):
         web = self.create_service('web')
@@ -99,10 +99,7 @@ class ProjectTest(DockerClientTestCase):
         self.create_service('extra').create_container()
 
         project = Project('composetest', [web, db], self.client)
-        self.assertEqual(
-            set(project.containers(stopped=True)),
-            set([web_1, db_1]),
-        )
+        assert set(project.containers(stopped=True)) == set([web_1, db_1])
 
     def test_volumes_from_service(self):
         project = Project.from_config(
@@ -121,7 +118,7 @@ class ProjectTest(DockerClientTestCase):
         )
         db = project.get_service('db')
         data = project.get_service('data')
-        self.assertEqual(db.volumes_from, [VolumeFromSpec(data, 'rw', 'service')])
+        assert db.volumes_from == [VolumeFromSpec(data, 'rw', 'service')]
 
     def test_volumes_from_container(self):
         data_container = Container.create(
@@ -143,7 +140,7 @@ class ProjectTest(DockerClientTestCase):
             client=self.client,
         )
         db = project.get_service('db')
-        self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw'])
+        assert db._get_volumes_from() == [data_container.id + ':rw']
 
     @v2_only()
     @no_cluster('container networks not supported in Swarm')
@@ -171,7 +168,7 @@ class ProjectTest(DockerClientTestCase):
 
         web = project.get_service('web')
         net = project.get_service('net')
-        self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+        assert web.network_mode.mode == 'container:' + net.containers()[0].id
 
     @v2_only()
     @no_cluster('container networks not supported in Swarm')
@@ -210,7 +207,7 @@ class ProjectTest(DockerClientTestCase):
         project.up()
 
         web = project.get_service('web')
-        self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+        assert web.network_mode.mode == 'container:' + net_container.id
 
     @no_cluster('container networks not supported in Swarm')
     def test_net_from_service_v1(self):
@@ -234,7 +231,7 @@ class ProjectTest(DockerClientTestCase):
 
         web = project.get_service('web')
         net = project.get_service('net')
-        self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+        assert web.network_mode.mode == 'container:' + net.containers()[0].id
 
     @no_cluster('container networks not supported in Swarm')
     def test_net_from_container_v1(self):
@@ -269,7 +266,7 @@ class ProjectTest(DockerClientTestCase):
         project.up()
 
         web = project.get_service('web')
-        self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+        assert web.network_mode.mode == 'container:' + net_container.id
 
     def test_start_pause_unpause_stop_kill_remove(self):
         web = self.create_service('web')
@@ -278,53 +275,51 @@ class ProjectTest(DockerClientTestCase):
 
         project.start()
 
-        self.assertEqual(len(web.containers()), 0)
-        self.assertEqual(len(db.containers()), 0)
+        assert len(web.containers()) == 0
+        assert len(db.containers()) == 0
 
         web_container_1 = web.create_container()
         web_container_2 = web.create_container()
         db_container = db.create_container()
 
         project.start(service_names=['web'])
-        self.assertEqual(
-            set(c.name for c in project.containers() if c.is_running),
-            set([web_container_1.name, web_container_2.name]))
+        assert set(c.name for c in project.containers() if c.is_running) == set(
+            [web_container_1.name, web_container_2.name]
+        )
 
         project.start()
-        self.assertEqual(
-            set(c.name for c in project.containers() if c.is_running),
-            set([web_container_1.name, web_container_2.name, db_container.name]))
+        assert set(c.name for c in project.containers() if c.is_running) == set(
+            [web_container_1.name, web_container_2.name, db_container.name]
+        )
 
         project.pause(service_names=['web'])
-        self.assertEqual(
-            set([c.name for c in project.containers() if c.is_paused]),
-            set([web_container_1.name, web_container_2.name]))
+        assert set([c.name for c in project.containers() if c.is_paused]) == set(
+            [web_container_1.name, web_container_2.name]
+        )
 
         project.pause()
-        self.assertEqual(
-            set([c.name for c in project.containers() if c.is_paused]),
-            set([web_container_1.name, web_container_2.name, db_container.name]))
+        assert set([c.name for c in project.containers() if c.is_paused]) == set(
+            [web_container_1.name, web_container_2.name, db_container.name]
+        )
 
         project.unpause(service_names=['db'])
-        self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2)
+        assert len([c.name for c in project.containers() if c.is_paused]) == 2
 
         project.unpause()
-        self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0)
+        assert len([c.name for c in project.containers() if c.is_paused]) == 0
 
         project.stop(service_names=['web'], timeout=1)
-        self.assertEqual(
-            set(c.name for c in project.containers() if c.is_running), set([db_container.name])
-        )
+        assert set(c.name for c in project.containers() if c.is_running) == set([db_container.name])
 
         project.kill(service_names=['db'])
-        self.assertEqual(len([c for c in project.containers() if c.is_running]), 0)
-        self.assertEqual(len(project.containers(stopped=True)), 3)
+        assert len([c for c in project.containers() if c.is_running]) == 0
+        assert len(project.containers(stopped=True)) == 3
 
         project.remove_stopped(service_names=['web'])
-        self.assertEqual(len(project.containers(stopped=True)), 1)
+        assert len(project.containers(stopped=True)) == 1
 
         project.remove_stopped()
-        self.assertEqual(len(project.containers(stopped=True)), 0)
+        assert len(project.containers(stopped=True)) == 0
 
     def test_create(self):
         web = self.create_service('web')
@@ -399,43 +394,43 @@ class ProjectTest(DockerClientTestCase):
         db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
         project = Project('composetest', [web, db], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['db'])
-        self.assertEqual(len(project.containers()), 1)
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(web.containers()), 0)
+        assert len(project.containers()) == 1
+        assert len(db.containers()) == 1
+        assert len(web.containers()) == 0
 
     def test_project_up_starts_uncreated_services(self):
         db = self.create_service('db')
         web = self.create_service('web', links=[(db, 'db')])
         project = Project('composetest', [db, web], self.client)
         project.up(['db'])
-        self.assertEqual(len(project.containers()), 1)
+        assert len(project.containers()) == 1
 
         project.up()
-        self.assertEqual(len(project.containers()), 2)
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(web.containers()), 1)
+        assert len(project.containers()) == 2
+        assert len(db.containers()) == 1
+        assert len(web.containers()) == 1
 
     def test_recreate_preserves_volumes(self):
         web = self.create_service('web')
         db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')])
         project = Project('composetest', [web, db], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['db'])
-        self.assertEqual(len(project.containers()), 1)
+        assert len(project.containers()) == 1
         old_db_id = project.containers()[0].id
         db_volume_path = project.containers()[0].get('Volumes./etc')
 
         project.up(strategy=ConvergenceStrategy.always)
-        self.assertEqual(len(project.containers()), 2)
+        assert len(project.containers()) == 2
 
         db_container = [c for c in project.containers() if 'db' in c.name][0]
-        self.assertNotEqual(db_container.id, old_db_id)
-        self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
+        assert db_container.id != old_db_id
+        assert db_container.get('Volumes./etc') == db_volume_path
 
     @v2_3_only()
     def test_recreate_preserves_mounts(self):
@@ -462,36 +457,34 @@ class ProjectTest(DockerClientTestCase):
         db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
         project = Project('composetest', [web, db], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['db'])
-        self.assertEqual(len(project.containers()), 1)
+        assert len(project.containers()) == 1
         old_db_id = project.containers()[0].id
         container, = project.containers()
         db_volume_path = container.get_mount('/var/db')['Source']
 
         project.up(strategy=ConvergenceStrategy.never)
-        self.assertEqual(len(project.containers()), 2)
+        assert len(project.containers()) == 2
 
         db_container = [c for c in project.containers() if 'db' in c.name][0]
-        self.assertEqual(db_container.id, old_db_id)
-        self.assertEqual(
-            db_container.get_mount('/var/db')['Source'],
-            db_volume_path)
+        assert db_container.id == old_db_id
+        assert db_container.get_mount('/var/db')['Source'] == db_volume_path
 
     def test_project_up_with_no_recreate_stopped(self):
         web = self.create_service('web')
         db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
         project = Project('composetest', [web, db], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['db'])
         project.kill()
 
         old_containers = project.containers(stopped=True)
 
-        self.assertEqual(len(old_containers), 1)
+        assert len(old_containers) == 1
         old_container, = old_containers
         old_db_id = old_container.id
         db_volume_path = old_container.get_mount('/var/db')['Source']
@@ -499,26 +492,24 @@ class ProjectTest(DockerClientTestCase):
         project.up(strategy=ConvergenceStrategy.never)
 
         new_containers = project.containers(stopped=True)
-        self.assertEqual(len(new_containers), 2)
-        self.assertEqual([c.is_running for c in new_containers], [True, True])
+        assert len(new_containers) == 2
+        assert [c.is_running for c in new_containers] == [True, True]
 
         db_container = [c for c in new_containers if 'db' in c.name][0]
-        self.assertEqual(db_container.id, old_db_id)
-        self.assertEqual(
-            db_container.get_mount('/var/db')['Source'],
-            db_volume_path)
+        assert db_container.id == old_db_id
+        assert db_container.get_mount('/var/db')['Source'] == db_volume_path
 
     def test_project_up_without_all_services(self):
         console = self.create_service('console')
         db = self.create_service('db')
         project = Project('composetest', [console, db], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up()
-        self.assertEqual(len(project.containers()), 2)
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(console.containers()), 1)
+        assert len(project.containers()) == 2
+        assert len(db.containers()) == 1
+        assert len(console.containers()) == 1
 
     def test_project_up_starts_links(self):
         console = self.create_service('console')
@@ -527,13 +518,13 @@ class ProjectTest(DockerClientTestCase):
 
         project = Project('composetest', [web, db, console], self.client)
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['web'])
-        self.assertEqual(len(project.containers()), 2)
-        self.assertEqual(len(web.containers()), 1)
-        self.assertEqual(len(db.containers()), 1)
-        self.assertEqual(len(console.containers()), 0)
+        assert len(project.containers()) == 2
+        assert len(web.containers()) == 1
+        assert len(db.containers()) == 1
+        assert len(console.containers()) == 0
 
     def test_project_up_starts_depends(self):
         project = Project.from_config(
@@ -561,14 +552,14 @@ class ProjectTest(DockerClientTestCase):
             client=self.client,
         )
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['web'])
-        self.assertEqual(len(project.containers()), 3)
-        self.assertEqual(len(project.get_service('web').containers()), 1)
-        self.assertEqual(len(project.get_service('db').containers()), 1)
-        self.assertEqual(len(project.get_service('data').containers()), 1)
-        self.assertEqual(len(project.get_service('console').containers()), 0)
+        assert len(project.containers()) == 3
+        assert len(project.get_service('web').containers()) == 1
+        assert len(project.get_service('db').containers()) == 1
+        assert len(project.get_service('data').containers()) == 1
+        assert len(project.get_service('console').containers()) == 0
 
     def test_project_up_with_no_deps(self):
         project = Project.from_config(
@@ -596,15 +587,15 @@ class ProjectTest(DockerClientTestCase):
             client=self.client,
         )
         project.start()
-        self.assertEqual(len(project.containers()), 0)
+        assert len(project.containers()) == 0
 
         project.up(['db'], start_deps=False)
-        self.assertEqual(len(project.containers(stopped=True)), 2)
-        self.assertEqual(len(project.get_service('web').containers()), 0)
-        self.assertEqual(len(project.get_service('db').containers()), 1)
-        self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
+        assert len(project.containers(stopped=True)) == 2
+        assert len(project.get_service('web').containers()) == 0
+        assert len(project.get_service('db').containers()) == 1
+        assert len(project.get_service('data').containers(stopped=True)) == 1
         assert not project.get_service('data').containers(stopped=True)[0].is_running
-        self.assertEqual(len(project.get_service('console').containers()), 0)
+        assert len(project.get_service('console').containers()) == 0
 
     def test_project_up_recreate_with_tmpfs_volume(self):
         # https://github.com/docker/compose/issues/4751
@@ -632,22 +623,22 @@ class ProjectTest(DockerClientTestCase):
 
         service = project.get_service('web')
         service.scale(1)
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
         service.scale(3)
-        self.assertEqual(len(service.containers()), 3)
+        assert len(service.containers()) == 3
         project.up()
         service = project.get_service('web')
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
         service.scale(1)
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
         project.up(scale_override={'web': 3})
         service = project.get_service('web')
-        self.assertEqual(len(service.containers()), 3)
+        assert len(service.containers()) == 3
         # does scale=0 ,makes any sense? after recreating at least 1 container is running
         service.scale(0)
         project.up()
         service = project.get_service('web')
-        self.assertEqual(len(service.containers()), 1)
+        assert len(service.containers()) == 1
 
     @v2_only()
     def test_project_up_networks(self):
@@ -832,11 +823,76 @@ class ProjectTest(DockerClientTestCase):
 
         service_container = project.get_service('web').containers()[0]
 
-        IPAMConfig = (service_container.inspect().get('NetworkSettings', {}).
-                      get('Networks', {}).get('composetest_static_test', {}).
-                      get('IPAMConfig', {}))
-        assert IPAMConfig.get('IPv4Address') == '172.16.100.100'
-        assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102'
+        ipam_config = (service_container.inspect().get('NetworkSettings', {}).
+                       get('Networks', {}).get('composetest_static_test', {}).
+                       get('IPAMConfig', {}))
+        assert ipam_config.get('IPv4Address') == '172.16.100.100'
+        assert ipam_config.get('IPv6Address') == 'fe80::1001:102'
+
+    @v2_3_only()
+    def test_up_with_network_priorities(self):
+        mac_address = '74:6f:75:68:6f:75'
+
+        def get_config_data(p1, p2, p3):
+            return build_config(
+                version=V2_3,
+                services=[{
+                    'name': 'web',
+                    'image': 'busybox:latest',
+                    'networks': {
+                        'n1': {
+                            'priority': p1,
+                        },
+                        'n2': {
+                            'priority': p2,
+                        },
+                        'n3': {
+                            'priority': p3,
+                        }
+                    },
+                    'command': 'top',
+                    'mac_address': mac_address
+                }],
+                networks={
+                    'n1': {},
+                    'n2': {},
+                    'n3': {}
+                }
+            )
+
+        config1 = get_config_data(1000, 1, 1)
+        config2 = get_config_data(2, 3, 1)
+        config3 = get_config_data(5, 40, 100)
+
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config1
+        )
+        project.up(detached=True)
+        service_container = project.get_service('web').containers()[0]
+        net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n1']
+        assert net_config['MacAddress'] == mac_address
+
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config2
+        )
+        project.up(detached=True)
+        service_container = project.get_service('web').containers()[0]
+        net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n2']
+        assert net_config['MacAddress'] == mac_address
+
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config3
+        )
+        project.up(detached=True)
+        service_container = project.get_service('web').containers()[0]
+        net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n3']
+        assert net_config['MacAddress'] == mac_address
 
     @v2_1_only()
     def test_up_with_enable_ipv6(self):
@@ -915,7 +971,7 @@ class ProjectTest(DockerClientTestCase):
             config_data=config_data,
         )
 
-        with self.assertRaises(ProjectError):
+        with pytest.raises(ProjectError):
             project.up()
 
     @v2_1_only()
@@ -1026,9 +1082,70 @@ class ProjectTest(DockerClientTestCase):
             name='composetest',
             config_data=config_data
         )
-        with self.assertRaises(ProjectError):
+        with pytest.raises(ProjectError):
             project.up()
 
+    @v2_3_only()
+    @if_runtime_available('runc')
+    def test_up_with_runtime(self):
+        self.require_api_version('1.30')
+        config_data = build_config(
+            version=V2_3,
+            services=[{
+                'name': 'web',
+                'image': 'busybox:latest',
+                'runtime': 'runc'
+            }],
+        )
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config_data
+        )
+        project.up(detached=True)
+        service_container = project.get_service('web').containers(stopped=True)[0]
+        assert service_container.inspect()['HostConfig']['Runtime'] == 'runc'
+
+    @v2_3_only()
+    def test_up_with_invalid_runtime(self):
+        self.require_api_version('1.30')
+        config_data = build_config(
+            version=V2_3,
+            services=[{
+                'name': 'web',
+                'image': 'busybox:latest',
+                'runtime': 'foobar'
+            }],
+        )
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config_data
+        )
+        with pytest.raises(ProjectError):
+            project.up()
+
+    @v2_3_only()
+    @if_runtime_available('nvidia')
+    def test_up_with_nvidia_runtime(self):
+        self.require_api_version('1.30')
+        config_data = build_config(
+            version=V2_3,
+            services=[{
+                'name': 'web',
+                'image': 'busybox:latest',
+                'runtime': 'nvidia'
+            }],
+        )
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config_data
+        )
+        project.up(detached=True)
+        service_container = project.get_service('web').containers(stopped=True)[0]
+        assert service_container.inspect()['HostConfig']['Runtime'] == 'nvidia'
+
     @v2_only()
     def test_project_up_with_network_internal(self):
         self.require_api_version('1.23')
@@ -1109,11 +1226,11 @@ class ProjectTest(DockerClientTestCase):
             config_data=config_data, client=self.client
         )
         project.up()
-        self.assertEqual(len(project.containers()), 1)
+        assert len(project.containers()) == 1
 
         volume_data = self.get_volume_data(full_vol_name)
         assert volume_data['Name'].split('/')[-1] == full_vol_name
-        self.assertEqual(volume_data['Driver'], 'local')
+        assert volume_data['Driver'] == 'local'
 
     @v2_1_only()
     def test_project_up_with_volume_labels(self):
@@ -1202,12 +1319,12 @@ class ProjectTest(DockerClientTestCase):
         )
         project.up()
         containers = project.containers()
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
 
         another = project.get_service('another').containers()[0]
         log_config = another.get('HostConfig.LogConfig')
-        self.assertTrue(log_config)
-        self.assertEqual(log_config.get('Type'), 'none')
+        assert log_config
+        assert log_config.get('Type') == 'none'
 
     @v2_only()
     def test_project_up_port_mappings_with_multiple_files(self):
@@ -1243,7 +1360,7 @@ class ProjectTest(DockerClientTestCase):
         )
         project.up()
         containers = project.containers()
-        self.assertEqual(len(containers), 1)
+        assert len(containers) == 1
 
     @v2_2_only()
     def test_project_up_config_scale(self):
@@ -1319,7 +1436,7 @@ class ProjectTest(DockerClientTestCase):
 
         volume_data = self.get_volume_data(full_vol_name)
         assert volume_data['Name'].split('/')[-1] == full_vol_name
-        self.assertEqual(volume_data['Driver'], 'local')
+        assert volume_data['Driver'] == 'local'
 
     @v3_only()
     def test_project_up_with_secrets(self):
@@ -1376,7 +1493,7 @@ class ProjectTest(DockerClientTestCase):
             name='composetest',
             config_data=config_data, client=self.client
         )
-        with self.assertRaises(APIError if is_cluster(self.client) else config.ConfigurationError):
+        with pytest.raises(APIError if is_cluster(self.client) else config.ConfigurationError):
             project.volumes.initialize()
 
     @v2_only()
@@ -1402,7 +1519,7 @@ class ProjectTest(DockerClientTestCase):
 
         volume_data = self.get_volume_data(full_vol_name)
         assert volume_data['Name'].split('/')[-1] == full_vol_name
-        self.assertEqual(volume_data['Driver'], 'local')
+        assert volume_data['Driver'] == 'local'
 
         config_data = config_data._replace(
             volumes={vol_name: {'driver': 'smb'}}
@@ -1412,11 +1529,11 @@ class ProjectTest(DockerClientTestCase):
             config_data=config_data,
             client=self.client
         )
-        with self.assertRaises(config.ConfigurationError) as e:
+        with pytest.raises(config.ConfigurationError) as e:
             project.volumes.initialize()
         assert 'Configuration for volume {0} specifies driver smb'.format(
             vol_name
-        ) in str(e.exception)
+        ) in str(e.value)
 
     @v2_only()
     def test_initialize_volumes_updated_blank_driver(self):
@@ -1440,7 +1557,7 @@ class ProjectTest(DockerClientTestCase):
 
         volume_data = self.get_volume_data(full_vol_name)
         assert volume_data['Name'].split('/')[-1] == full_vol_name
-        self.assertEqual(volume_data['Driver'], 'local')
+        assert volume_data['Driver'] == 'local'
 
         config_data = config_data._replace(
             volumes={vol_name: {}}
@@ -1453,7 +1570,7 @@ class ProjectTest(DockerClientTestCase):
         project.volumes.initialize()
         volume_data = self.get_volume_data(full_vol_name)
         assert volume_data['Name'].split('/')[-1] == full_vol_name
-        self.assertEqual(volume_data['Driver'], 'local')
+        assert volume_data['Driver'] == 'local'
 
     @v2_only()
     @no_cluster('inspect volume by name defect on Swarm Classic')
@@ -1479,7 +1596,7 @@ class ProjectTest(DockerClientTestCase):
         )
         project.volumes.initialize()
 
-        with self.assertRaises(NotFound):
+        with pytest.raises(NotFound):
             self.client.inspect_volume(full_vol_name)
 
     @v2_only()
@@ -1501,11 +1618,11 @@ class ProjectTest(DockerClientTestCase):
             name='composetest',
             config_data=config_data, client=self.client
         )
-        with self.assertRaises(config.ConfigurationError) as e:
+        with pytest.raises(config.ConfigurationError) as e:
             project.volumes.initialize()
         assert 'Volume {0} declared as external'.format(
             vol_name
-        ) in str(e.exception)
+        ) in str(e.value)
 
     @v2_only()
     def test_project_up_named_volumes_in_binds(self):
@@ -1534,10 +1651,10 @@ class ProjectTest(DockerClientTestCase):
             name='composetest', config_data=config_data, client=self.client
         )
         service = project.services[0]
-        self.assertEqual(service.name, 'simple')
+        assert service.name == 'simple'
         volumes = service.options.get('volumes')
-        self.assertEqual(len(volumes), 1)
-        self.assertEqual(volumes[0].external, full_vol_name)
+        assert len(volumes) == 1
+        assert volumes[0].external == full_vol_name
         project.up()
         engine_volumes = self.client.volumes()['Volumes']
         container = service.get_container()
@@ -1581,6 +1698,31 @@ class ProjectTest(DockerClientTestCase):
             if ctnr.labels.get(LABEL_SERVICE) == 'service1'
         ]) == 0
 
+    def test_project_up_ignore_orphans(self):
+        config_dict = {
+            'service1': {
+                'image': 'busybox:latest',
+                'command': 'top',
+            }
+        }
+
+        config_data = load_config(config_dict)
+        project = Project.from_config(
+            name='composetest', config_data=config_data, client=self.client
+        )
+        project.up()
+        config_dict['service2'] = config_dict['service1']
+        del config_dict['service1']
+
+        config_data = load_config(config_dict)
+        project = Project.from_config(
+            name='composetest', config_data=config_data, client=self.client
+        )
+        with mock.patch('compose.project.log') as mock_log:
+            project.up(ignore_orphans=True)
+
+        mock_log.warning.assert_not_called()
+
     @v2_1_only()
     def test_project_up_healthy_dependency(self):
         config_dict = {

+ 7 - 5
tests/integration/resilience_test.py

@@ -1,6 +1,8 @@
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
+import pytest
+
 from .. import mock
 from .testcases import DockerClientTestCase
 from compose.config.types import VolumeSpec
@@ -28,25 +30,25 @@ class ResilienceTest(DockerClientTestCase):
     def test_successful_recreate(self):
         self.project.up(strategy=ConvergenceStrategy.always)
         container = self.db.containers()[0]
-        self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+        assert container.get_mount('/var/db')['Source'] == self.host_path
 
     def test_create_failure(self):
         with mock.patch('compose.service.Service.create_container', crash):
-            with self.assertRaises(Crash):
+            with pytest.raises(Crash):
                 self.project.up(strategy=ConvergenceStrategy.always)
 
         self.project.up()
         container = self.db.containers()[0]
-        self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+        assert container.get_mount('/var/db')['Source'] == self.host_path
 
     def test_start_failure(self):
         with mock.patch('compose.service.Service.start_container', crash):
-            with self.assertRaises(Crash):
+            with pytest.raises(Crash):
                 self.project.up(strategy=ConvergenceStrategy.always)
 
         self.project.up()
         container = self.db.containers()[0]
-        self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+        assert container.get_mount('/var/db')['Source'] == self.host_path
 
 
 class Crash(Exception):

Datei-Diff unterdrückt, da er zu groß ist
+ 297 - 185
tests/integration/service_test.py


+ 52 - 49
tests/integration/state_test.py

@@ -46,12 +46,12 @@ class BasicProjectTest(ProjectTestCase):
 
     def test_no_change(self):
         old_containers = self.run_up(self.cfg)
-        self.assertEqual(len(old_containers), 2)
+        assert len(old_containers) == 2
 
         new_containers = self.run_up(self.cfg)
-        self.assertEqual(len(new_containers), 2)
+        assert len(new_containers) == 2
 
-        self.assertEqual(old_containers, new_containers)
+        assert old_containers == new_containers
 
     def test_partial_change(self):
         old_containers = self.run_up(self.cfg)
@@ -61,34 +61,34 @@ class BasicProjectTest(ProjectTestCase):
         self.cfg['web']['command'] = '/bin/true'
 
         new_containers = self.run_up(self.cfg)
-        self.assertEqual(len(new_containers), 2)
+        assert len(new_containers) == 2
 
         preserved = list(old_containers & new_containers)
-        self.assertEqual(preserved, [old_db])
+        assert preserved == [old_db]
 
         removed = list(old_containers - new_containers)
-        self.assertEqual(removed, [old_web])
+        assert removed == [old_web]
 
         created = list(new_containers - old_containers)
-        self.assertEqual(len(created), 1)
-        self.assertEqual(created[0].name_without_project, 'web_1')
-        self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
+        assert len(created) == 1
+        assert created[0].name_without_project == 'web_1'
+        assert created[0].get('Config.Cmd') == ['/bin/true']
 
     def test_all_change(self):
         old_containers = self.run_up(self.cfg)
-        self.assertEqual(len(old_containers), 2)
+        assert len(old_containers) == 2
 
         self.cfg['web']['command'] = '/bin/true'
         self.cfg['db']['command'] = '/bin/true'
 
         new_containers = self.run_up(self.cfg)
-        self.assertEqual(len(new_containers), 2)
+        assert len(new_containers) == 2
 
         unchanged = old_containers & new_containers
-        self.assertEqual(len(unchanged), 0)
+        assert len(unchanged) == 0
 
         new = new_containers - old_containers
-        self.assertEqual(len(new), 2)
+        assert len(new) == 2
 
 
 class ProjectWithDependenciesTest(ProjectTestCase):
@@ -114,10 +114,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
 
     def test_up(self):
         containers = self.run_up(self.cfg)
-        self.assertEqual(
-            set(c.name_without_project for c in containers),
-            set(['db_1', 'web_1', 'nginx_1']),
-        )
+        assert set(c.name_without_project for c in containers) == set(['db_1', 'web_1', 'nginx_1'])
 
     def test_change_leaf(self):
         old_containers = self.run_up(self.cfg)
@@ -125,10 +122,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
         self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
         new_containers = self.run_up(self.cfg)
 
-        self.assertEqual(
-            set(c.name_without_project for c in new_containers - old_containers),
-            set(['nginx_1']),
-        )
+        assert set(c.name_without_project for c in new_containers - old_containers) == set(['nginx_1'])
 
     def test_change_middle(self):
         old_containers = self.run_up(self.cfg)
@@ -136,10 +130,16 @@ class ProjectWithDependenciesTest(ProjectTestCase):
         self.cfg['web']['environment'] = {'NEW_VAR': '1'}
         new_containers = self.run_up(self.cfg)
 
-        self.assertEqual(
-            set(c.name_without_project for c in new_containers - old_containers),
-            set(['web_1', 'nginx_1']),
-        )
+        assert set(c.name_without_project for c in new_containers - old_containers) == set(['web_1'])
+
+    def test_change_middle_always_recreate_deps(self):
+        old_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+        self.cfg['web']['environment'] = {'NEW_VAR': '1'}
+        new_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+        assert set(c.name_without_project
+                   for c in new_containers - old_containers) == {'web_1', 'nginx_1'}
 
     def test_change_root(self):
         old_containers = self.run_up(self.cfg)
@@ -147,10 +147,16 @@ class ProjectWithDependenciesTest(ProjectTestCase):
         self.cfg['db']['environment'] = {'NEW_VAR': '1'}
         new_containers = self.run_up(self.cfg)
 
-        self.assertEqual(
-            set(c.name_without_project for c in new_containers - old_containers),
-            set(['db_1', 'web_1', 'nginx_1']),
-        )
+        assert set(c.name_without_project for c in new_containers - old_containers) == set(['db_1'])
+
+    def test_change_root_always_recreate_deps(self):
+        old_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+        self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+        new_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+        assert set(c.name_without_project
+                   for c in new_containers - old_containers) == {'db_1', 'web_1', 'nginx_1'}
 
     def test_change_root_no_recreate(self):
         old_containers = self.run_up(self.cfg)
@@ -160,7 +166,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
             self.cfg,
             strategy=ConvergenceStrategy.never)
 
-        self.assertEqual(new_containers - old_containers, set())
+        assert new_containers - old_containers == set()
 
     def test_service_removed_while_down(self):
         next_cfg = {
@@ -172,26 +178,26 @@ class ProjectWithDependenciesTest(ProjectTestCase):
         }
 
         containers = self.run_up(self.cfg)
-        self.assertEqual(len(containers), 3)
+        assert len(containers) == 3
 
         project = self.make_project(self.cfg)
         project.stop(timeout=1)
 
         containers = self.run_up(next_cfg)
-        self.assertEqual(len(containers), 2)
+        assert len(containers) == 2
 
     def test_service_recreated_when_dependency_created(self):
         containers = self.run_up(self.cfg, service_names=['web'], start_deps=False)
-        self.assertEqual(len(containers), 1)
+        assert len(containers) == 1
 
         containers = self.run_up(self.cfg)
-        self.assertEqual(len(containers), 3)
+        assert len(containers) == 3
 
         web, = [c for c in containers if c.service == 'web']
         nginx, = [c for c in containers if c.service == 'nginx']
 
-        self.assertEqual(set(get_links(web)), {'composetest_db_1', 'db', 'db_1'})
-        self.assertEqual(set(get_links(nginx)), {'composetest_web_1', 'web', 'web_1'})
+        assert set(get_links(web)) == {'composetest_db_1', 'db', 'db_1'}
+        assert set(get_links(nginx)) == {'composetest_web_1', 'web', 'web_1'}
 
 
 class ServiceStateTest(DockerClientTestCase):
@@ -199,7 +205,7 @@ class ServiceStateTest(DockerClientTestCase):
 
     def test_trigger_create(self):
         web = self.create_service('web')
-        self.assertEqual(('create', []), web.convergence_plan())
+        assert ('create', []) == web.convergence_plan()
 
     def test_trigger_noop(self):
         web = self.create_service('web')
@@ -207,7 +213,7 @@ class ServiceStateTest(DockerClientTestCase):
         web.start()
 
         web = self.create_service('web')
-        self.assertEqual(('noop', [container]), web.convergence_plan())
+        assert ('noop', [container]) == web.convergence_plan()
 
     def test_trigger_start(self):
         options = dict(command=["top"])
@@ -219,26 +225,23 @@ class ServiceStateTest(DockerClientTestCase):
         containers[0].stop()
         containers[0].inspect()
 
-        self.assertEqual([c.is_running for c in containers], [False, True])
+        assert [c.is_running for c in containers] == [False, True]
 
-        self.assertEqual(
-            ('start', containers[0:1]),
-            web.convergence_plan(),
-        )
+        assert ('start', containers[0:1]) == web.convergence_plan()
 
     def test_trigger_recreate_with_config_change(self):
         web = self.create_service('web', command=["top"])
         container = web.create_container()
 
         web = self.create_service('web', command=["top", "-d", "1"])
-        self.assertEqual(('recreate', [container]), web.convergence_plan())
+        assert ('recreate', [container]) == web.convergence_plan()
 
     def test_trigger_recreate_with_nonexistent_image_tag(self):
         web = self.create_service('web', image="busybox:latest")
         container = web.create_container()
 
         web = self.create_service('web', image="nonexistent-image")
-        self.assertEqual(('recreate', [container]), web.convergence_plan())
+        assert ('recreate', [container]) == web.convergence_plan()
 
     def test_trigger_recreate_with_image_change(self):
         repo = 'composetest_myimage'
@@ -270,7 +273,7 @@ class ServiceStateTest(DockerClientTestCase):
         self.client.remove_container(c)
 
         web = self.create_service('web', image=image)
-        self.assertEqual(('recreate', [container]), web.convergence_plan())
+        assert ('recreate', [container]) == web.convergence_plan()
 
     @no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
     def test_trigger_recreate_with_build(self):
@@ -288,7 +291,7 @@ class ServiceStateTest(DockerClientTestCase):
         web.build()
 
         web = self.create_service('web', build={'context': str(context)})
-        self.assertEqual(('recreate', [container]), web.convergence_plan())
+        assert ('recreate', [container]) == web.convergence_plan()
 
     def test_image_changed_to_build(self):
         context = py.test.ensuretemp('test_image_changed_to_build')
@@ -303,6 +306,6 @@ class ServiceStateTest(DockerClientTestCase):
 
         web = self.create_service('web', build={'context': str(context)})
         plan = web.convergence_plan()
-        self.assertEqual(('recreate', [container]), plan)
+        assert ('recreate', [container]) == plan
         containers = web.execute_convergence_plan(plan)
-        self.assertEqual(len(containers), 1)
+        assert len(containers) == 1

+ 12 - 0
tests/integration/testcases.py

@@ -155,6 +155,18 @@ class DockerClientTestCase(unittest.TestCase):
         return self.client.inspect_volume(volumes[0]['Name'])
 
 
+def if_runtime_available(runtime):
+    def decorator(f):
+        @functools.wraps(f)
+        def wrapper(self, *args, **kwargs):
+            if runtime not in self.client.info().get('Runtimes', {}):
+                return pytest.skip("This daemon does not support the '{}'' runtime".format(runtime))
+            return f(self, *args, **kwargs)
+        return wrapper
+
+    return decorator
+
+
 def is_cluster(client):
     if SWARM_ASSUME_MULTINODE:
         return True

+ 1 - 1
tests/unit/bundle_test.py

@@ -2,9 +2,9 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import docker
-import mock
 import pytest
 
+from .. import mock
 from compose import bundle
 from compose import service
 from compose.cli.errors import UserError

+ 24 - 4
tests/unit/cli/docker_client_test.py

@@ -60,13 +60,13 @@ class DockerClientTestCase(unittest.TestCase):
             platform.system(),
             platform.release()
         )
-        self.assertEqual(client.headers['User-Agent'], expected)
+        assert client.headers['User-Agent'] == expected
 
 
 class TLSConfigTestCase(unittest.TestCase):
-    ca_cert = 'tests/fixtures/tls/ca.pem'
-    client_cert = 'tests/fixtures/tls/cert.pem'
-    key = 'tests/fixtures/tls/key.key'
+    ca_cert = os.path.join('tests/fixtures/tls/', 'ca.pem')
+    client_cert = os.path.join('tests/fixtures/tls/', 'cert.pem')
+    key = os.path.join('tests/fixtures/tls/', 'key.pem')
 
     def test_simple_tls(self):
         options = {'--tls': True}
@@ -168,6 +168,26 @@ class TLSConfigTestCase(unittest.TestCase):
         assert isinstance(result, docker.tls.TLSConfig)
         assert result.ssl_version == ssl.PROTOCOL_TLSv1
 
+    def test_tls_mixed_environment_and_flags(self):
+        options = {'--tls': True, '--tlsverify': False}
+        environment = {'DOCKER_CERT_PATH': 'tests/fixtures/tls/'}
+        result = tls_config_from_options(options, environment)
+        assert isinstance(result, docker.tls.TLSConfig)
+        assert result.cert == (self.client_cert, self.key)
+        assert result.ca_cert == self.ca_cert
+        assert result.verify is False
+
+    def test_tls_flags_override_environment(self):
+        environment = {'DOCKER_TLS_VERIFY': True}
+        options = {'--tls': True, '--tlsverify': False}
+        assert tls_config_from_options(options, environment) is True
+
+        environment['COMPOSE_TLS_VERSION'] = 'TLSv1'
+        result = tls_config_from_options(options, environment)
+        assert isinstance(result, docker.tls.TLSConfig)
+        assert result.ssl_version == ssl.PROTOCOL_TLSv1
+        assert result.verify is False
+
 
 class TestGetTlsVersion(object):
     def test_get_tls_version_default(self):

+ 0 - 1
tests/unit/cli/formatter_test.py

@@ -37,7 +37,6 @@ class ConsoleWarningFormatterTestCase(unittest.TestCase):
     def test_format_unicode_info(self):
         message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
         output = self.formatter.format(make_log_record(logging.INFO, message))
-        print(output)
         assert output == message.decode('utf-8')
 
     def test_format_unicode_warn(self):

+ 10 - 0
tests/unit/cli/main_test.py

@@ -3,6 +3,7 @@ from __future__ import unicode_literals
 
 import logging
 
+import docker
 import pytest
 
 from compose import container
@@ -11,6 +12,7 @@ from compose.cli.formatter import ConsoleWarningFormatter
 from compose.cli.main import convergence_strategy_from_opts
 from compose.cli.main import filter_containers_to_service_names
 from compose.cli.main import setup_console_handler
+from compose.cli.main import warn_for_swarm_mode
 from compose.service import ConvergenceStrategy
 from tests import mock
 
@@ -54,6 +56,14 @@ class TestCLIMainTestCase(object):
         actual = filter_containers_to_service_names(containers, service_names)
         assert actual == containers
 
+    def test_warning_in_swarm_mode(self):
+        mock_client = mock.create_autospec(docker.APIClient)
+        mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+
+        with mock.patch('compose.cli.main.log') as fake_log:
+            warn_for_swarm_mode(mock_client)
+            assert fake_log.warn.call_count == 1
+
 
 class TestSetupConsoleHandlerTestCase(object):
 

+ 4 - 4
tests/unit/cli/verbose_proxy_test.py

@@ -16,18 +16,18 @@ class VerboseProxyTestCase(unittest.TestCase):
             ("arg1", True),
             {'key': 'value'})
 
-        self.assertEqual(expected, actual)
+        assert expected == actual
 
     def test_format_return_sequence(self):
         expected = "(list with 10 items)"
         actual = verbose_proxy.format_return(list(range(10)), 2)
-        self.assertEqual(expected, actual)
+        assert expected == actual
 
     def test_format_return(self):
         expected = repr({'Id': 'ok'})
         actual = verbose_proxy.format_return({'Id': 'ok'}, 2)
-        self.assertEqual(expected, actual)
+        assert expected == actual
 
     def test_format_return_no_result(self):
         actual = verbose_proxy.format_return(None, 2)
-        self.assertEqual(None, actual)
+        assert actual is None

+ 14 - 17
tests/unit/cli_test.py

@@ -30,36 +30,36 @@ class CLITestCase(unittest.TestCase):
         test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
         with test_dir.as_cwd():
             project_name = get_project_name('.')
-            self.assertEqual('simplecomposefile', project_name)
+            assert 'simplecomposefile' == project_name
 
     def test_project_name_with_explicit_base_dir(self):
         base_dir = 'tests/fixtures/simple-composefile'
         project_name = get_project_name(base_dir)
-        self.assertEqual('simplecomposefile', project_name)
+        assert 'simplecomposefile' == project_name
 
     def test_project_name_with_explicit_uppercase_base_dir(self):
         base_dir = 'tests/fixtures/UpperCaseDir'
         project_name = get_project_name(base_dir)
-        self.assertEqual('uppercasedir', project_name)
+        assert 'uppercasedir' == project_name
 
     def test_project_name_with_explicit_project_name(self):
         name = 'explicit-project-name'
         project_name = get_project_name(None, project_name=name)
-        self.assertEqual('explicitprojectname', project_name)
+        assert 'explicitprojectname' == project_name
 
     @mock.patch.dict(os.environ)
     def test_project_name_from_environment_new_var(self):
         name = 'namefromenv'
         os.environ['COMPOSE_PROJECT_NAME'] = name
         project_name = get_project_name(None)
-        self.assertEqual(project_name, name)
+        assert project_name == name
 
     def test_project_name_with_empty_environment_var(self):
         base_dir = 'tests/fixtures/simple-composefile'
         with mock.patch.dict(os.environ):
             os.environ['COMPOSE_PROJECT_NAME'] = ''
             project_name = get_project_name(base_dir)
-        self.assertEqual('simplecomposefile', project_name)
+        assert 'simplecomposefile' == project_name
 
     @mock.patch.dict(os.environ)
     def test_project_name_with_environment_file(self):
@@ -80,9 +80,9 @@ class CLITestCase(unittest.TestCase):
     def test_get_project(self):
         base_dir = 'tests/fixtures/longer-filename-composefile'
         project = get_project(base_dir)
-        self.assertEqual(project.name, 'longerfilenamecomposefile')
-        self.assertTrue(project.client)
-        self.assertTrue(project.services)
+        assert project.name == 'longerfilenamecomposefile'
+        assert project.client
+        assert project.services
 
     def test_command_help(self):
         with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
@@ -97,7 +97,9 @@ class CLITestCase(unittest.TestCase):
     @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
     @mock.patch('compose.cli.main.RunOperation', autospec=True)
     @mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
+    @mock.patch.dict(os.environ)
     def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
+        os.environ['COMPOSE_INTERACTIVE_NO_CLI'] = 'true'
         mock_client = mock.create_autospec(docker.APIClient)
         mock_client.api_version = DEFAULT_DOCKER_API_VERSION
         project = Project.from_config(
@@ -165,10 +167,7 @@ class CLITestCase(unittest.TestCase):
             '--workdir': None,
         })
 
-        self.assertEqual(
-            mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
-            'always'
-        )
+        assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
 
         command = TopLevelCommand(project)
         command.run({
@@ -189,9 +188,7 @@ class CLITestCase(unittest.TestCase):
             '--workdir': None,
         })
 
-        self.assertFalse(
-            mock_client.create_host_config.call_args[1].get('restart_policy')
-        )
+        assert not mock_client.create_host_config.call_args[1].get('restart_policy')
 
     def test_command_manual_and_service_ports_together(self):
         project = Project.from_config(
@@ -203,7 +200,7 @@ class CLITestCase(unittest.TestCase):
         )
         command = TopLevelCommand(project)
 
-        with self.assertRaises(UserError):
+        with pytest.raises(UserError):
             command.run({
                 'SERVICE': 'service',
                 'COMMAND': None,

Datei-Diff unterdrückt, da er zu groß ist
+ 308 - 196
tests/unit/config/config_test.py


+ 77 - 2
tests/unit/config/interpolation_test.py

@@ -1,13 +1,16 @@
+# encoding: utf-8
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import pytest
 
 from compose.config.environment import Environment
+from compose.config.errors import ConfigurationError
 from compose.config.interpolation import interpolate_environment_variables
 from compose.config.interpolation import Interpolator
 from compose.config.interpolation import InvalidInterpolation
 from compose.config.interpolation import TemplateWithDefaults
+from compose.config.interpolation import UnsetRequiredSubstitution
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import COMPOSEFILE_V2_3 as V2_3
 from compose.const import COMPOSEFILE_V3_4 as V3_4
@@ -106,7 +109,7 @@ def test_interpolate_environment_variables_in_secrets(mock_env):
         'secretservice': {
             'file': 'bar',
             'labels': {
-                'max': 2,
+                'max': '2',
                 'user': 'jenny'
             }
         },
@@ -254,6 +257,30 @@ def test_interpolate_environment_services_convert_types_v3(mock_env):
     assert value == expected
 
 
+def test_interpolate_environment_services_convert_types_invalid(mock_env):
+    entry = {'service1': {'privileged': '${POSINT}'}}
+
+    with pytest.raises(ConfigurationError) as exc:
+        interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+    assert 'Error while attempting to convert service.service1.privileged to '\
+        'appropriate type: "50" is not a valid boolean value' in exc.exconly()
+
+    entry = {'service1': {'cpus': '${TRUE}'}}
+    with pytest.raises(ConfigurationError) as exc:
+        interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+    assert 'Error while attempting to convert service.service1.cpus to '\
+        'appropriate type: "True" is not a valid float' in exc.exconly()
+
+    entry = {'service1': {'ulimits': {'nproc': '${FLOAT}'}}}
+    with pytest.raises(ConfigurationError) as exc:
+        interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+    assert 'Error while attempting to convert service.service1.ulimits.nproc to '\
+        'appropriate type: "0.145" is not a valid integer' in exc.exconly()
+
+
 def test_interpolate_environment_network_convert_types(mock_env):
     entry = {
         'network1': {
@@ -332,9 +359,57 @@ def test_interpolate_with_value(defaults_interpolator):
 def test_interpolate_missing_with_default(defaults_interpolator):
     assert defaults_interpolator("ok ${missing:-def}") == "ok def"
     assert defaults_interpolator("ok ${missing-def}") == "ok def"
-    assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
 
 
 def test_interpolate_with_empty_and_default_value(defaults_interpolator):
     assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
     assert defaults_interpolator("ok ${BAR-def}") == "ok "
+
+
+def test_interpolate_mandatory_values(defaults_interpolator):
+    assert defaults_interpolator("ok ${FOO:?bar}") == "ok first"
+    assert defaults_interpolator("ok ${FOO?bar}") == "ok first"
+    assert defaults_interpolator("ok ${BAR?bar}") == "ok "
+
+    with pytest.raises(UnsetRequiredSubstitution) as e:
+        defaults_interpolator("not ok ${BAR:?high bar}")
+    assert e.value.err == 'high bar'
+
+    with pytest.raises(UnsetRequiredSubstitution) as e:
+        defaults_interpolator("not ok ${BAZ?dropped the bazz}")
+    assert e.value.err == 'dropped the bazz'
+
+
+def test_interpolate_mandatory_no_err_msg(defaults_interpolator):
+    with pytest.raises(UnsetRequiredSubstitution) as e:
+        defaults_interpolator("not ok ${BAZ?}")
+
+    assert e.value.err == ''
+
+
+def test_interpolate_mixed_separators(defaults_interpolator):
+    assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
+    assert defaults_interpolator("ok ${BAR:-:?wwegegr??:?}") == "ok :?wwegegr??:?"
+    assert defaults_interpolator("ok ${BAR-:-hello}") == 'ok '
+
+    with pytest.raises(UnsetRequiredSubstitution) as e:
+        defaults_interpolator("not ok ${BAR:?xazz:-redf}")
+    assert e.value.err == 'xazz:-redf'
+
+    assert defaults_interpolator("ok ${BAR?...:?bar}") == "ok "
+
+
+def test_unbraced_separators(defaults_interpolator):
+    assert defaults_interpolator("ok $FOO:-bar") == "ok first:-bar"
+    assert defaults_interpolator("ok $BAZ?error") == "ok ?error"
+
+
+def test_interpolate_unicode_values():
+    variable_mapping = {
+        'FOO': '十六夜 咲夜'.encode('utf-8'),
+        'BAR': '十六夜 咲夜'
+    }
+    interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate
+
+    interpol("$FOO") == '十六夜 咲夜'
+    interpol("${BAR}") == '十六夜 咲夜'

+ 33 - 41
tests/unit/container_test.py

@@ -39,13 +39,11 @@ class ContainerTest(unittest.TestCase):
         container = Container.from_ps(None,
                                       self.container_dict,
                                       has_been_inspected=True)
-        self.assertEqual(
-            container.dictionary,
-            {
-                "Id": self.container_id,
-                "Image": "busybox:latest",
-                "Name": "/composetest_db_1",
-            })
+        assert container.dictionary == {
+            "Id": self.container_id,
+            "Image": "busybox:latest",
+            "Name": "/composetest_db_1",
+        }
 
     def test_from_ps_prefixed(self):
         self.container_dict['Names'] = [
@@ -56,11 +54,11 @@ class ContainerTest(unittest.TestCase):
             None,
             self.container_dict,
             has_been_inspected=True)
-        self.assertEqual(container.dictionary, {
+        assert container.dictionary == {
             "Id": self.container_id,
             "Image": "busybox:latest",
             "Name": "/composetest_db_1",
-        })
+        }
 
     def test_environment(self):
         container = Container(None, {
@@ -72,30 +70,30 @@ class ContainerTest(unittest.TestCase):
                 ]
             }
         }, has_been_inspected=True)
-        self.assertEqual(container.environment, {
+        assert container.environment == {
             'FOO': 'BAR',
             'BAZ': 'DOGE',
-        })
+        }
 
     def test_number(self):
         container = Container(None, self.container_dict, has_been_inspected=True)
-        self.assertEqual(container.number, 7)
+        assert container.number == 7
 
     def test_name(self):
         container = Container.from_ps(None,
                                       self.container_dict,
                                       has_been_inspected=True)
-        self.assertEqual(container.name, "composetest_db_1")
+        assert container.name == "composetest_db_1"
 
     def test_name_without_project(self):
         self.container_dict['Name'] = "/composetest_web_7"
         container = Container(None, self.container_dict, has_been_inspected=True)
-        self.assertEqual(container.name_without_project, "web_7")
+        assert container.name_without_project == "web_7"
 
     def test_name_without_project_custom_container_name(self):
         self.container_dict['Name'] = "/custom_name_of_container"
         container = Container(None, self.container_dict, has_been_inspected=True)
-        self.assertEqual(container.name_without_project, "custom_name_of_container")
+        assert container.name_without_project == "custom_name_of_container"
 
     def test_inspect_if_not_inspected(self):
         mock_client = mock.create_autospec(docker.APIClient)
@@ -103,16 +101,15 @@ class ContainerTest(unittest.TestCase):
 
         container.inspect_if_not_inspected()
         mock_client.inspect_container.assert_called_once_with("the_id")
-        self.assertEqual(container.dictionary,
-                         mock_client.inspect_container.return_value)
-        self.assertTrue(container.has_been_inspected)
+        assert container.dictionary == mock_client.inspect_container.return_value
+        assert container.has_been_inspected
 
         container.inspect_if_not_inspected()
-        self.assertEqual(mock_client.inspect_container.call_count, 1)
+        assert mock_client.inspect_container.call_count == 1
 
     def test_human_readable_ports_none(self):
         container = Container(None, self.container_dict, has_been_inspected=True)
-        self.assertEqual(container.human_readable_ports, '')
+        assert container.human_readable_ports == ''
 
     def test_human_readable_ports_public_and_private(self):
         self.container_dict['NetworkSettings']['Ports'].update({
@@ -122,7 +119,7 @@ class ContainerTest(unittest.TestCase):
         container = Container(None, self.container_dict, has_been_inspected=True)
 
         expected = "45453/tcp, 0.0.0.0:49197->45454/tcp"
-        self.assertEqual(container.human_readable_ports, expected)
+        assert container.human_readable_ports == expected
 
     def test_get_local_port(self):
         self.container_dict['NetworkSettings']['Ports'].update({
@@ -130,9 +127,7 @@ class ContainerTest(unittest.TestCase):
         })
         container = Container(None, self.container_dict, has_been_inspected=True)
 
-        self.assertEqual(
-            container.get_local_port(45454, protocol='tcp'),
-            '0.0.0.0:49197')
+        assert container.get_local_port(45454, protocol='tcp') == '0.0.0.0:49197'
 
     def test_get(self):
         container = Container(None, {
@@ -142,9 +137,9 @@ class ContainerTest(unittest.TestCase):
             },
         }, has_been_inspected=True)
 
-        self.assertEqual(container.get('Status'), "Up 8 seconds")
-        self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"])
-        self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None)
+        assert container.get('Status') == "Up 8 seconds"
+        assert container.get('HostConfig.VolumesFrom') == ["volume_id"]
+        assert container.get('Foo.Bar.DoesNotExist') is None
 
     def test_short_id(self):
         container = Container(None, self.container_dict, has_been_inspected=True)
@@ -182,17 +177,14 @@ class ContainerTest(unittest.TestCase):
 class GetContainerNameTestCase(unittest.TestCase):
 
     def test_get_container_name(self):
-        self.assertIsNone(get_container_name({}))
-        self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1')
-        self.assertEqual(
-            get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}),
-            'myproject_db_1')
-        self.assertEqual(
-            get_container_name({
-                'Names': [
-                    '/swarm-host-1/myproject_db_1',
-                    '/swarm-host-1/myproject_web_1/db'
-                ]
-            }),
-            'myproject_db_1'
-        )
+        assert get_container_name({}) is None
+        assert get_container_name({'Name': 'myproject_db_1'}) == 'myproject_db_1'
+        assert get_container_name(
+            {'Names': ['/myproject_db_1', '/myproject_web_1/db']}
+        ) == 'myproject_db_1'
+        assert get_container_name({
+            'Names': [
+                '/swarm-host-1/myproject_db_1',
+                '/swarm-host-1/myproject_web_1/db'
+            ]
+        }) == 'myproject_db_1'

+ 109 - 85
tests/unit/parallel_test.py

@@ -1,11 +1,13 @@
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
+import unittest
 from threading import Lock
 
 import six
 from docker.errors import APIError
 
+from compose.parallel import GlobalLimit
 from compose.parallel import parallel_execute
 from compose.parallel import parallel_execute_iter
 from compose.parallel import ParallelStreamWriter
@@ -31,91 +33,113 @@ def get_deps(obj):
     return [(dep, None) for dep in deps[obj]]
 
 
-def test_parallel_execute():
-    results, errors = parallel_execute(
-        objects=[1, 2, 3, 4, 5],
-        func=lambda x: x * 2,
-        get_name=six.text_type,
-        msg="Doubling",
-    )
-
-    assert sorted(results) == [2, 4, 6, 8, 10]
-    assert errors == {}
-
-
-def test_parallel_execute_with_limit():
-    limit = 1
-    tasks = 20
-    lock = Lock()
-
-    def f(obj):
-        locked = lock.acquire(False)
-        # we should always get the lock because we're the only thread running
-        assert locked
-        lock.release()
-        return None
-
-    results, errors = parallel_execute(
-        objects=list(range(tasks)),
-        func=f,
-        get_name=six.text_type,
-        msg="Testing",
-        limit=limit,
-    )
-
-    assert results == tasks * [None]
-    assert errors == {}
-
-
-def test_parallel_execute_with_deps():
-    log = []
-
-    def process(x):
-        log.append(x)
-
-    parallel_execute(
-        objects=objects,
-        func=process,
-        get_name=lambda obj: obj,
-        msg="Processing",
-        get_deps=get_deps,
-    )
-
-    assert sorted(log) == sorted(objects)
-
-    assert log.index(data_volume) < log.index(db)
-    assert log.index(db) < log.index(web)
-    assert log.index(cache) < log.index(web)
-
-
-def test_parallel_execute_with_upstream_errors():
-    log = []
-
-    def process(x):
-        if x is data_volume:
-            raise APIError(None, None, "Something went wrong")
-        log.append(x)
-
-    parallel_execute(
-        objects=objects,
-        func=process,
-        get_name=lambda obj: obj,
-        msg="Processing",
-        get_deps=get_deps,
-    )
-
-    assert log == [cache]
-
-    events = [
-        (obj, result, type(exception))
-        for obj, result, exception
-        in parallel_execute_iter(objects, process, get_deps, None)
-    ]
-
-    assert (cache, None, type(None)) in events
-    assert (data_volume, None, APIError) in events
-    assert (db, None, UpstreamError) in events
-    assert (web, None, UpstreamError) in events
+class ParallelTest(unittest.TestCase):
+
+    def test_parallel_execute(self):
+        results, errors = parallel_execute(
+            objects=[1, 2, 3, 4, 5],
+            func=lambda x: x * 2,
+            get_name=six.text_type,
+            msg="Doubling",
+        )
+
+        assert sorted(results) == [2, 4, 6, 8, 10]
+        assert errors == {}
+
+    def test_parallel_execute_with_limit(self):
+        limit = 1
+        tasks = 20
+        lock = Lock()
+
+        def f(obj):
+            locked = lock.acquire(False)
+            # we should always get the lock because we're the only thread running
+            assert locked
+            lock.release()
+            return None
+
+        results, errors = parallel_execute(
+            objects=list(range(tasks)),
+            func=f,
+            get_name=six.text_type,
+            msg="Testing",
+            limit=limit,
+        )
+
+        assert results == tasks * [None]
+        assert errors == {}
+
+    def test_parallel_execute_with_global_limit(self):
+        GlobalLimit.set_global_limit(1)
+        self.addCleanup(GlobalLimit.set_global_limit, None)
+        tasks = 20
+        lock = Lock()
+
+        def f(obj):
+            locked = lock.acquire(False)
+            # we should always get the lock because we're the only thread running
+            assert locked
+            lock.release()
+            return None
+
+        results, errors = parallel_execute(
+            objects=list(range(tasks)),
+            func=f,
+            get_name=six.text_type,
+            msg="Testing",
+        )
+
+        assert results == tasks * [None]
+        assert errors == {}
+
+    def test_parallel_execute_with_deps(self):
+        log = []
+
+        def process(x):
+            log.append(x)
+
+        parallel_execute(
+            objects=objects,
+            func=process,
+            get_name=lambda obj: obj,
+            msg="Processing",
+            get_deps=get_deps,
+        )
+
+        assert sorted(log) == sorted(objects)
+
+        assert log.index(data_volume) < log.index(db)
+        assert log.index(db) < log.index(web)
+        assert log.index(cache) < log.index(web)
+
+    def test_parallel_execute_with_upstream_errors(self):
+        log = []
+
+        def process(x):
+            if x is data_volume:
+                raise APIError(None, None, "Something went wrong")
+            log.append(x)
+
+        parallel_execute(
+            objects=objects,
+            func=process,
+            get_name=lambda obj: obj,
+            msg="Processing",
+            get_deps=get_deps,
+        )
+
+        assert log == [cache]
+
+        events = [
+            (obj, result, type(exception))
+            for obj, result, exception
+            in parallel_execute_iter(objects, process, get_deps, None)
+        ]
+
+        assert (cache, None, type(None)) in events
+        assert (data_volume, None, APIError) in events
+        assert (db, None, UpstreamError) in events
+        assert (web, None, UpstreamError) in events
 
 
 def test_parallel_execute_alignment(capsys):

+ 6 - 6
tests/unit/progress_stream_test.py

@@ -15,7 +15,7 @@ class ProgressStreamTestCase(unittest.TestCase):
             b'"progress": "..."}',
         ]
         events = progress_stream.stream_output(output, StringIO())
-        self.assertEqual(len(events), 1)
+        assert len(events) == 1
 
     def test_stream_output_div_zero(self):
         output = [
@@ -24,7 +24,7 @@ class ProgressStreamTestCase(unittest.TestCase):
             b'"progress": "..."}',
         ]
         events = progress_stream.stream_output(output, StringIO())
-        self.assertEqual(len(events), 1)
+        assert len(events) == 1
 
     def test_stream_output_null_total(self):
         output = [
@@ -33,7 +33,7 @@ class ProgressStreamTestCase(unittest.TestCase):
             b'"progress": "..."}',
         ]
         events = progress_stream.stream_output(output, StringIO())
-        self.assertEqual(len(events), 1)
+        assert len(events) == 1
 
     def test_stream_output_progress_event_tty(self):
         events = [
@@ -46,7 +46,7 @@ class ProgressStreamTestCase(unittest.TestCase):
 
         output = TTYStringIO()
         events = progress_stream.stream_output(events, output)
-        self.assertTrue(len(output.getvalue()) > 0)
+        assert len(output.getvalue()) > 0
 
     def test_stream_output_progress_event_no_tty(self):
         events = [
@@ -55,7 +55,7 @@ class ProgressStreamTestCase(unittest.TestCase):
         output = StringIO()
 
         events = progress_stream.stream_output(events, output)
-        self.assertEqual(len(output.getvalue()), 0)
+        assert len(output.getvalue()) == 0
 
     def test_stream_output_no_progress_event_no_tty(self):
         events = [
@@ -64,7 +64,7 @@ class ProgressStreamTestCase(unittest.TestCase):
         output = StringIO()
 
         events = progress_stream.stream_output(events, output)
-        self.assertTrue(len(output.getvalue()) > 0)
+        assert len(output.getvalue()) > 0
 
 
 def test_get_digest_from_push():

+ 24 - 32
tests/unit/project_test.py

@@ -1,3 +1,4 @@
+# encoding: utf-8
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
@@ -14,6 +15,7 @@ from compose.const import COMPOSEFILE_V1 as V1
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import LABEL_SERVICE
 from compose.container import Container
+from compose.project import NoSuchService
 from compose.project import Project
 from compose.service import ImageType
 from compose.service import Service
@@ -46,12 +48,12 @@ class ProjectTest(unittest.TestCase):
             config_data=config,
             client=None,
         )
-        self.assertEqual(len(project.services), 2)
-        self.assertEqual(project.get_service('web').name, 'web')
-        self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
-        self.assertEqual(project.get_service('db').name, 'db')
-        self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
-        self.assertFalse(project.networks.use_networking)
+        assert len(project.services) == 2
+        assert project.get_service('web').name == 'web'
+        assert project.get_service('web').options['image'] == 'busybox:latest'
+        assert project.get_service('db').name == 'db'
+        assert project.get_service('db').options['image'] == 'busybox:latest'
+        assert not project.networks.use_networking
 
     def test_from_config_v2(self):
         config = Config(
@@ -72,8 +74,8 @@ class ProjectTest(unittest.TestCase):
             configs=None,
         )
         project = Project.from_config('composetest', config, None)
-        self.assertEqual(len(project.services), 2)
-        self.assertTrue(project.networks.use_networking)
+        assert len(project.services) == 2
+        assert project.networks.use_networking
 
     def test_get_service(self):
         web = Service(
@@ -83,7 +85,7 @@ class ProjectTest(unittest.TestCase):
             image="busybox:latest",
         )
         project = Project('test', [web], None)
-        self.assertEqual(project.get_service('web'), web)
+        assert project.get_service('web') == web
 
     def test_get_services_returns_all_services_without_args(self):
         web = Service(
@@ -97,7 +99,7 @@ class ProjectTest(unittest.TestCase):
             image='foo',
         )
         project = Project('test', [web, console], None)
-        self.assertEqual(project.get_services(), [web, console])
+        assert project.get_services() == [web, console]
 
     def test_get_services_returns_listed_services_with_args(self):
         web = Service(
@@ -111,7 +113,7 @@ class ProjectTest(unittest.TestCase):
             image='foo',
         )
         project = Project('test', [web, console], None)
-        self.assertEqual(project.get_services(['console']), [console])
+        assert project.get_services(['console']) == [console]
 
     def test_get_services_with_include_links(self):
         db = Service(
@@ -137,10 +139,7 @@ class ProjectTest(unittest.TestCase):
             links=[(web, 'web')]
         )
         project = Project('test', [web, db, cache, console], None)
-        self.assertEqual(
-            project.get_services(['console'], include_deps=True),
-            [db, web, console]
-        )
+        assert project.get_services(['console'], include_deps=True) == [db, web, console]
 
     def test_get_services_removes_duplicates_following_links(self):
         db = Service(
@@ -155,10 +154,7 @@ class ProjectTest(unittest.TestCase):
             links=[(db, 'database')]
         )
         project = Project('test', [web, db], None)
-        self.assertEqual(
-            project.get_services(['web', 'db'], include_deps=True),
-            [db, web]
-        )
+        assert project.get_services(['web', 'db'], include_deps=True) == [db, web]
 
     def test_use_volumes_from_container(self):
         container_id = 'aabbccddee'
@@ -377,8 +373,8 @@ class ProjectTest(unittest.TestCase):
             ),
         )
         service = project.get_service('test')
-        self.assertEqual(service.network_mode.id, None)
-        self.assertNotIn('NetworkMode', service._get_container_host_config({}))
+        assert service.network_mode.id is None
+        assert 'NetworkMode' not in service._get_container_host_config({})
 
     def test_use_net_from_container(self):
         container_id = 'aabbccddee'
@@ -403,7 +399,7 @@ class ProjectTest(unittest.TestCase):
             ),
         )
         service = project.get_service('test')
-        self.assertEqual(service.network_mode.mode, 'container:' + container_id)
+        assert service.network_mode.mode == 'container:' + container_id
 
     def test_use_net_from_service(self):
         container_name = 'test_aaa_1'
@@ -439,7 +435,7 @@ class ProjectTest(unittest.TestCase):
         )
 
         service = project.get_service('test')
-        self.assertEqual(service.network_mode.mode, 'container:' + container_name)
+        assert service.network_mode.mode == 'container:' + container_name
 
     def test_uses_default_network_true(self):
         project = Project.from_config(
@@ -513,7 +509,7 @@ class ProjectTest(unittest.TestCase):
                 configs=None,
             ),
         )
-        self.assertEqual([c.id for c in project.containers()], ['1'])
+        assert [c.id for c in project.containers()] == ['1']
 
     def test_down_with_no_resources(self):
         project = Project.from_config(
@@ -537,14 +533,6 @@ class ProjectTest(unittest.TestCase):
         project.down(ImageType.all, True)
         self.mock_client.remove_image.assert_called_once_with("busybox:latest")
 
-    def test_warning_in_swarm_mode(self):
-        self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
-        project = Project('composetest', [], self.mock_client)
-
-        with mock.patch('compose.project.log') as fake_log:
-            project.up()
-            assert fake_log.warn.call_count == 1
-
     def test_no_warning_on_stop(self):
         self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
         project = Project('composetest', [], self.mock_client)
@@ -568,3 +556,7 @@ class ProjectTest(unittest.TestCase):
         with mock.patch('compose.project.log') as fake_log:
             project.up()
             assert fake_log.warn.call_count == 0
+
+    def test_no_such_service_unicode(self):
+        assert NoSuchService('十六夜 咲夜'.encode('utf-8')).msg == 'No such service: 十六夜 咲夜'
+        assert NoSuchService('十六夜 咲夜').msg == 'No such service: 十六夜 咲夜'

+ 79 - 100
tests/unit/service_test.py

@@ -46,14 +46,14 @@ class ServiceTest(unittest.TestCase):
     def test_containers(self):
         service = Service('db', self.mock_client, 'myproject', image='foo')
         self.mock_client.containers.return_value = []
-        self.assertEqual(list(service.containers()), [])
+        assert list(service.containers()) == []
 
     def test_containers_with_containers(self):
         self.mock_client.containers.return_value = [
             dict(Name=str(i), Image='foo', Id=i) for i in range(3)
         ]
         service = Service('db', self.mock_client, 'myproject', image='foo')
-        self.assertEqual([c.id for c in service.containers()], list(range(3)))
+        assert [c.id for c in service.containers()] == list(range(3))
 
         expected_labels = [
             '{0}=myproject'.format(LABEL_PROJECT),
@@ -73,9 +73,9 @@ class ServiceTest(unittest.TestCase):
         ]
         service = Service('db', self.mock_client, 'myproject', image='foo')
 
-        self.assertEqual([c.id for c in service.containers()], ['1'])
-        self.assertEqual(service._next_container_number(), 2)
-        self.assertEqual(service.get_container(1).id, '1')
+        assert [c.id for c in service.containers()] == ['1']
+        assert service._next_container_number() == 2
+        assert service.get_container(1).id == '1'
 
     def test_get_volumes_from_container(self):
         container_id = 'aabbccddee'
@@ -88,7 +88,7 @@ class ServiceTest(unittest.TestCase):
                     'rw',
                     'container')])
 
-        self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+        assert service._get_volumes_from() == [container_id + ':rw']
 
     def test_get_volumes_from_container_read_only(self):
         container_id = 'aabbccddee'
@@ -101,7 +101,7 @@ class ServiceTest(unittest.TestCase):
                     'ro',
                     'container')])
 
-        self.assertEqual(service._get_volumes_from(), [container_id + ':ro'])
+        assert service._get_volumes_from() == [container_id + ':ro']
 
     def test_get_volumes_from_service_container_exists(self):
         container_ids = ['aabbccddee', '12345']
@@ -115,7 +115,7 @@ class ServiceTest(unittest.TestCase):
             volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')],
             image='foo')
 
-        self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"])
+        assert service._get_volumes_from() == [container_ids[0] + ":rw"]
 
     def test_get_volumes_from_service_container_exists_with_flags(self):
         for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
@@ -130,7 +130,7 @@ class ServiceTest(unittest.TestCase):
                 volumes_from=[VolumeFromSpec(from_service, mode, 'service')],
                 image='foo')
 
-            self.assertEqual(service._get_volumes_from(), [container_ids[0]])
+            assert service._get_volumes_from() == [container_ids[0]]
 
     def test_get_volumes_from_service_no_container(self):
         container_id = 'abababab'
@@ -144,7 +144,7 @@ class ServiceTest(unittest.TestCase):
             image='foo',
             volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')])
 
-        self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+        assert service._get_volumes_from() == [container_id + ':rw']
         from_service.create_container.assert_called_once_with()
 
     def test_memory_swap_limit(self):
@@ -159,22 +159,16 @@ class ServiceTest(unittest.TestCase):
             memswap_limit=2000000000)
         service._get_container_create_options({'some': 'overrides'}, 1)
 
-        self.assertTrue(self.mock_client.create_host_config.called)
-        self.assertEqual(
-            self.mock_client.create_host_config.call_args[1]['mem_limit'],
-            1000000000
-        )
-        self.assertEqual(
-            self.mock_client.create_host_config.call_args[1]['memswap_limit'],
-            2000000000
-        )
+        assert self.mock_client.create_host_config.called
+        assert self.mock_client.create_host_config.call_args[1]['mem_limit'] == 1000000000
+        assert self.mock_client.create_host_config.call_args[1]['memswap_limit'] == 2000000000
 
     def test_self_reference_external_link(self):
         service = Service(
             name='foo',
             external_links=['default_foo_1']
         )
-        with self.assertRaises(DependencyError):
+        with pytest.raises(DependencyError):
             service.get_container_name('foo', 1)
 
     def test_mem_reservation(self):
@@ -202,11 +196,8 @@ class ServiceTest(unittest.TestCase):
             cgroup_parent='test')
         service._get_container_create_options({'some': 'overrides'}, 1)
 
-        self.assertTrue(self.mock_client.create_host_config.called)
-        self.assertEqual(
-            self.mock_client.create_host_config.call_args[1]['cgroup_parent'],
-            'test'
-        )
+        assert self.mock_client.create_host_config.called
+        assert self.mock_client.create_host_config.call_args[1]['cgroup_parent'] == 'test'
 
     def test_log_opt(self):
         self.mock_client.create_host_config.return_value = {}
@@ -222,11 +213,10 @@ class ServiceTest(unittest.TestCase):
             logging=logging)
         service._get_container_create_options({'some': 'overrides'}, 1)
 
-        self.assertTrue(self.mock_client.create_host_config.called)
-        self.assertEqual(
-            self.mock_client.create_host_config.call_args[1]['log_config'],
-            {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
-        )
+        assert self.mock_client.create_host_config.called
+        assert self.mock_client.create_host_config.call_args[1]['log_config'] == {
+            'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}
+        }
 
     def test_stop_grace_period(self):
         self.mock_client.api_version = '1.25'
@@ -237,7 +227,7 @@ class ServiceTest(unittest.TestCase):
             client=self.mock_client,
             stop_grace_period="1m35s")
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['stop_timeout'], 95)
+        assert opts['stop_timeout'] == 95
 
     def test_split_domainname_none(self):
         service = Service(
@@ -246,8 +236,8 @@ class ServiceTest(unittest.TestCase):
             hostname='name.domain.tld',
             client=self.mock_client)
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['hostname'], 'name.domain.tld', 'hostname')
-        self.assertFalse('domainname' in opts, 'domainname')
+        assert opts['hostname'] == 'name.domain.tld', 'hostname'
+        assert not ('domainname' in opts), 'domainname'
 
     def test_split_domainname_fqdn(self):
         self.mock_client.api_version = '1.22'
@@ -257,8 +247,8 @@ class ServiceTest(unittest.TestCase):
             image='foo',
             client=self.mock_client)
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['hostname'], 'name', 'hostname')
-        self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+        assert opts['hostname'] == 'name', 'hostname'
+        assert opts['domainname'] == 'domain.tld', 'domainname'
 
     def test_split_domainname_both(self):
         self.mock_client.api_version = '1.22'
@@ -269,8 +259,8 @@ class ServiceTest(unittest.TestCase):
             domainname='domain.tld',
             client=self.mock_client)
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['hostname'], 'name', 'hostname')
-        self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+        assert opts['hostname'] == 'name', 'hostname'
+        assert opts['domainname'] == 'domain.tld', 'domainname'
 
     def test_split_domainname_weird(self):
         self.mock_client.api_version = '1.22'
@@ -281,8 +271,8 @@ class ServiceTest(unittest.TestCase):
             image='foo',
             client=self.mock_client)
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
-        self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+        assert opts['hostname'] == 'name.sub', 'hostname'
+        assert opts['domainname'] == 'domain.tld', 'domainname'
 
     def test_no_default_hostname_when_not_using_networking(self):
         service = Service(
@@ -292,7 +282,7 @@ class ServiceTest(unittest.TestCase):
             client=self.mock_client,
         )
         opts = service._get_container_create_options({'image': 'foo'}, 1)
-        self.assertIsNone(opts.get('hostname'))
+        assert opts.get('hostname') is None
 
     def test_get_container_create_options_with_name_option(self):
         service = Service(
@@ -305,7 +295,7 @@ class ServiceTest(unittest.TestCase):
             {'name': name},
             1,
             one_off=OneOffFilter.only)
-        self.assertEqual(opts['name'], name)
+        assert opts['name'] == name
 
     def test_get_container_create_options_does_not_mutate_options(self):
         labels = {'thing': 'real'}
@@ -328,12 +318,11 @@ class ServiceTest(unittest.TestCase):
             1,
             previous_container=prev_container)
 
-        self.assertEqual(service.options['labels'], labels)
-        self.assertEqual(service.options['environment'], environment)
+        assert service.options['labels'] == labels
+        assert service.options['environment'] == environment
 
-        self.assertEqual(
-            opts['labels'][LABEL_CONFIG_HASH],
-            '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa')
+        assert opts['labels'][LABEL_CONFIG_HASH] == \
+            '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa'
         assert opts['environment'] == ['also=real']
 
     def test_get_container_create_options_sets_affinity_with_binds(self):
@@ -385,7 +374,8 @@ class ServiceTest(unittest.TestCase):
         self.mock_client.containers.return_value = []
         service = Service('foo', client=self.mock_client, image='foo')
 
-        self.assertRaises(ValueError, service.get_container)
+        with pytest.raises(ValueError):
+            service.get_container()
 
     @mock.patch('compose.service.Container', autospec=True)
     def test_get_container(self, mock_container_class):
@@ -394,7 +384,7 @@ class ServiceTest(unittest.TestCase):
         service = Service('foo', image='foo', client=self.mock_client)
 
         container = service.get_container(number=2)
-        self.assertEqual(container, mock_container_class.from_ps.return_value)
+        assert container == mock_container_class.from_ps.return_value
         mock_container_class.from_ps.assert_called_once_with(
             self.mock_client, container_dict)
 
@@ -449,23 +439,17 @@ class ServiceTest(unittest.TestCase):
         mock_container.stop.assert_called_once_with(timeout=1)
 
     def test_parse_repository_tag(self):
-        self.assertEqual(parse_repository_tag("root"), ("root", "", ":"))
-        self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":"))
-        self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":"))
-        self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":"))
-        self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":"))
-        self.assertEqual(
-            parse_repository_tag("url:5000/repo:tag"),
-            ("url:5000/repo", "tag", ":"))
-        self.assertEqual(
-            parse_repository_tag("root@sha256:digest"),
-            ("root", "sha256:digest", "@"))
-        self.assertEqual(
-            parse_repository_tag("user/repo@sha256:digest"),
-            ("user/repo", "sha256:digest", "@"))
-        self.assertEqual(
-            parse_repository_tag("url:5000/repo@sha256:digest"),
-            ("url:5000/repo", "sha256:digest", "@"))
+        assert parse_repository_tag("root") == ("root", "", ":")
+        assert parse_repository_tag("root:tag") == ("root", "tag", ":")
+        assert parse_repository_tag("user/repo") == ("user/repo", "", ":")
+        assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag", ":")
+        assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", "", ":")
+        assert parse_repository_tag("url:5000/repo:tag") == ("url:5000/repo", "tag", ":")
+        assert parse_repository_tag("root@sha256:digest") == ("root", "sha256:digest", "@")
+        assert parse_repository_tag("user/repo@sha256:digest") == ("user/repo", "sha256:digest", "@")
+        assert parse_repository_tag("url:5000/repo@sha256:digest") == (
+            "url:5000/repo", "sha256:digest", "@"
+        )
 
     def test_create_container(self):
         service = Service('foo', client=self.mock_client, build={'context': '.'})
@@ -553,8 +537,8 @@ class ServiceTest(unittest.TestCase):
         service = Service('foo', client=self.mock_client, build={'context': '.'})
         service.build()
 
-        self.assertEqual(self.mock_client.build.call_count, 1)
-        self.assertFalse(self.mock_client.build.call_args[1]['pull'])
+        assert self.mock_client.build.call_count == 1
+        assert not self.mock_client.build.call_args[1]['pull']
 
     def test_build_with_override_build_args(self):
         self.mock_client.build.return_value = [
@@ -653,63 +637,63 @@ class ServiceTest(unittest.TestCase):
         service = Service(
             'foo',
             image='foo')
-        self.assertEqual(service.specifies_host_port(), False)
+        assert not service.specifies_host_port()
 
     def test_specifies_host_port_with_container_port(self):
         service = Service(
             'foo',
             image='foo',
             ports=["2000"])
-        self.assertEqual(service.specifies_host_port(), False)
+        assert not service.specifies_host_port()
 
     def test_specifies_host_port_with_host_port(self):
         service = Service(
             'foo',
             image='foo',
             ports=["1000:2000"])
-        self.assertEqual(service.specifies_host_port(), True)
+        assert service.specifies_host_port()
 
     def test_specifies_host_port_with_host_ip_no_port(self):
         service = Service(
             'foo',
             image='foo',
             ports=["127.0.0.1::2000"])
-        self.assertEqual(service.specifies_host_port(), False)
+        assert not service.specifies_host_port()
 
     def test_specifies_host_port_with_host_ip_and_port(self):
         service = Service(
             'foo',
             image='foo',
             ports=["127.0.0.1:1000:2000"])
-        self.assertEqual(service.specifies_host_port(), True)
+        assert service.specifies_host_port()
 
     def test_specifies_host_port_with_container_port_range(self):
         service = Service(
             'foo',
             image='foo',
             ports=["2000-3000"])
-        self.assertEqual(service.specifies_host_port(), False)
+        assert not service.specifies_host_port()
 
     def test_specifies_host_port_with_host_port_range(self):
         service = Service(
             'foo',
             image='foo',
             ports=["1000-2000:2000-3000"])
-        self.assertEqual(service.specifies_host_port(), True)
+        assert service.specifies_host_port()
 
     def test_specifies_host_port_with_host_ip_no_port_range(self):
         service = Service(
             'foo',
             image='foo',
             ports=["127.0.0.1::2000-3000"])
-        self.assertEqual(service.specifies_host_port(), False)
+        assert not service.specifies_host_port()
 
     def test_specifies_host_port_with_host_ip_and_port_range(self):
         service = Service(
             'foo',
             image='foo',
             ports=["127.0.0.1:1000-2000:2000-3000"])
-        self.assertEqual(service.specifies_host_port(), True)
+        assert service.specifies_host_port()
 
     def test_image_name_from_config(self):
         image_name = 'example/web:latest'
@@ -730,10 +714,10 @@ class ServiceTest(unittest.TestCase):
             ports=["8080:80"])
 
         service.scale(0)
-        self.assertFalse(mock_log.warn.called)
+        assert not mock_log.warn.called
 
         service.scale(1)
-        self.assertFalse(mock_log.warn.called)
+        assert not mock_log.warn.called
 
         service.scale(2)
         mock_log.warn.assert_called_once_with(
@@ -815,16 +799,16 @@ class NetTestCase(unittest.TestCase):
 
     def test_network_mode(self):
         network_mode = NetworkMode('host')
-        self.assertEqual(network_mode.id, 'host')
-        self.assertEqual(network_mode.mode, 'host')
-        self.assertEqual(network_mode.service_name, None)
+        assert network_mode.id == 'host'
+        assert network_mode.mode == 'host'
+        assert network_mode.service_name is None
 
     def test_network_mode_container(self):
         container_id = 'abcd'
         network_mode = ContainerNetworkMode(Container(None, {'Id': container_id}))
-        self.assertEqual(network_mode.id, container_id)
-        self.assertEqual(network_mode.mode, 'container:' + container_id)
-        self.assertEqual(network_mode.service_name, None)
+        assert network_mode.id == container_id
+        assert network_mode.mode == 'container:' + container_id
+        assert network_mode.service_name is None
 
     def test_network_mode_service(self):
         container_id = 'bbbb'
@@ -837,9 +821,9 @@ class NetTestCase(unittest.TestCase):
         service = Service(name=service_name, client=mock_client)
         network_mode = ServiceNetworkMode(service)
 
-        self.assertEqual(network_mode.id, service_name)
-        self.assertEqual(network_mode.mode, 'container:' + container_id)
-        self.assertEqual(network_mode.service_name, service_name)
+        assert network_mode.id == service_name
+        assert network_mode.mode == 'container:' + container_id
+        assert network_mode.service_name == service_name
 
     def test_network_mode_service_no_containers(self):
         service_name = 'web'
@@ -849,9 +833,9 @@ class NetTestCase(unittest.TestCase):
         service = Service(name=service_name, client=mock_client)
         network_mode = ServiceNetworkMode(service)
 
-        self.assertEqual(network_mode.id, service_name)
-        self.assertEqual(network_mode.mode, None)
-        self.assertEqual(network_mode.service_name, service_name)
+        assert network_mode.id == service_name
+        assert network_mode.mode is None
+        assert network_mode.service_name == service_name
 
 
 class ServicePortsTest(unittest.TestCase):
@@ -1002,13 +986,10 @@ class ServiceVolumesTest(unittest.TestCase):
             number=1,
         )
 
-        self.assertEqual(
-            set(self.mock_client.create_host_config.call_args[1]['binds']),
-            set([
-                '/host/path:/data1:rw',
-                '/host/path:/data2:rw',
-            ]),
-        )
+        assert set(self.mock_client.create_host_config.call_args[1]['binds']) == set([
+            '/host/path:/data1:rw',
+            '/host/path:/data2:rw',
+        ])
 
     def test_get_container_create_options_with_different_host_path_in_container_json(self):
         service = Service(
@@ -1113,9 +1094,7 @@ class ServiceVolumesTest(unittest.TestCase):
         ).create_container()
 
         assert self.mock_client.create_container.call_count == 1
-        self.assertEqual(
-            self.mock_client.create_host_config.call_args[1]['binds'],
-            [volume])
+        assert self.mock_client.create_host_config.call_args[1]['binds'] == [volume]
 
 
 class ServiceSecretTest(unittest.TestCase):

+ 2 - 2
tests/unit/split_buffer_test.py

@@ -50,5 +50,5 @@ class SplitBufferTest(unittest.TestCase):
         split = split_buffer(reader())
 
         for (actual, expected) in zip(split, expectations):
-            self.assertEqual(type(actual), type(expected))
-            self.assertEqual(actual, expected)
+            assert type(actual) == type(expected)
+            assert actual == expected

Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.