Ver código fonte

Merge pull request #4750 from docker/bump-1.13.0-rc1

Bump 1.13.0 rc1
Joffrey F 8 anos atrás
pai
commit
f4328ccf6d

+ 48 - 1
CHANGELOG.md

@@ -1,6 +1,53 @@
 Change log
 ==========
 
+1.13.0 (2017-05-01)
+-------------------
+
+### Breaking changes
+
+- `docker-compose up` now resets a service's scaling to its default value.
+  You can use the newly introduced `--scale` option to specify a custom
+  scale value
+
+### New features
+
+#### Compose file version 2.2
+
+- Introduced version 2.2 of the `docker-compose.yml` specification. This
+  version requires to be used with Docker Engine 1.13.0 or above
+
+- Added support for `init` in service definitions.
+
+- Added support for `scale` in service definitions. The configuration's value
+  can be overridden using the `--scale` flag in `docker-compose up`.
+  Please note that the `scale` command is disabled for this file format
+
+#### Compose file version 2.x
+
+- Added support for `options` in the `ipam` section of network definitions
+
+### Bugfixes
+
+- Fixed a bug where paths provided to compose via the `-f` option were not
+  being resolved properly
+
+- Fixed a bug where the `ext_ip::target_port` notation in the ports section
+  was incorrectly marked as invalid
+
+- Fixed an issue where the `exec` command would sometimes not return control
+  to the terminal when using the `-d` flag
+
+- Fixed a bug where secrets were missing from the output of the `config`
+  command for v3.2 files
+
+- Fixed an issue where `docker-compose` would hang if no internet connection
+  was available
+
+- Fixed an issue where paths containing unicode characters passed via the `-f`
+  flag were causing Compose to crash
+
+
 1.12.0 (2017-04-04)
 -------------------
 
@@ -8,7 +55,7 @@ Change log
 
 #### Compose file version 3.2
 
-- Introduced version 3.2 of the `docker-compose.yml` specification.
+- Introduced version 3.2 of the `docker-compose.yml` specification
 
 - Added support for `cache_from` in the `build` section of services
 

+ 1 - 1
compose/__init__.py

@@ -1,4 +1,4 @@
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
-__version__ = '1.12.0'
+__version__ = '1.13.0-rc1'

+ 6 - 1
compose/cli/__init__.py

@@ -2,6 +2,7 @@ from __future__ import absolute_import
 from __future__ import print_function
 from __future__ import unicode_literals
 
+import os
 import subprocess
 import sys
 
@@ -12,8 +13,12 @@ try:
     # https://github.com/docker/compose/issues/4425
     # https://github.com/docker/compose/issues/4481
     # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
+    env = os.environ.copy()
+    env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
+
     s_cmd = subprocess.Popen(
-        ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE
+        ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+        env=env
     )
     packages = s_cmd.communicate()[0].splitlines()
     dockerpy_installed = len(

+ 5 - 2
compose/cli/command.py

@@ -49,14 +49,17 @@ def get_config_from_options(base_dir, options):
 
 
 def get_config_path_from_options(base_dir, options, environment):
+    def unicode_paths(paths):
+        return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
+
     file_option = options.get('--file')
     if file_option:
-        return file_option
+        return unicode_paths(file_option)
 
     config_files = environment.get('COMPOSE_FILE')
     if config_files:
         pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
-        return config_files.split(pathsep)
+        return unicode_paths(config_files.split(pathsep))
     return None
 
 

+ 32 - 13
compose/cli/main.py

@@ -26,6 +26,7 @@ from ..config import resolve_build_args
 from ..config.environment import Environment
 from ..config.serialize import serialize_config
 from ..config.types import VolumeSpec
+from ..const import COMPOSEFILE_V2_2 as V2_2
 from ..const import IS_WINDOWS_PLATFORM
 from ..errors import StreamParseError
 from ..progress_stream import StreamOutputError
@@ -439,7 +440,7 @@ class TopLevelCommand(object):
         exec_id = container.create_exec(command, **create_exec_options)
 
         if detach:
-            container.start_exec(exec_id, tty=tty)
+            container.start_exec(exec_id, tty=tty, stream=True)
             return
 
         signals.set_signal_handler_to_shutdown()
@@ -771,15 +772,13 @@ class TopLevelCommand(object):
         """
         timeout = timeout_from_opts(options)
 
-        for s in options['SERVICE=NUM']:
-            if '=' not in s:
-                raise UserError('Arguments to scale should be in the form service=num')
-            service_name, num = s.split('=', 1)
-            try:
-                num = int(num)
-            except ValueError:
-                raise UserError('Number of containers for service "%s" is not a '
-                                'number' % service_name)
+        if self.project.config_version == V2_2:
+            raise UserError(
+                'The scale command is incompatible with the v2.2 format. '
+                'Use the up command with the --scale flag instead.'
+            )
+
+        for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
             self.project.get_service(service_name).scale(num, timeout=timeout)
 
     def start(self, options):
@@ -875,7 +874,7 @@ class TopLevelCommand(object):
         If you want to force Compose to stop and recreate all containers, use the
         `--force-recreate` flag.
 
-        Usage: up [options] [SERVICE...]
+        Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
 
         Options:
             -d                         Detached mode: Run containers in the background,
@@ -898,7 +897,9 @@ class TopLevelCommand(object):
             --remove-orphans           Remove containers for services not
                                        defined in the Compose file
             --exit-code-from SERVICE   Return the exit code of the selected service container.
-                                       Requires --abort-on-container-exit.
+                                       Implies --abort-on-container-exit.
+            --scale SERVICE=NUM        Scale SERVICE to NUM instances. Overrides the `scale`
+                                       setting in the Compose file if present.
         """
         start_deps = not options['--no-deps']
         exit_value_from = exitval_from_opts(options, self.project)
@@ -919,7 +920,9 @@ class TopLevelCommand(object):
                 do_build=build_action_from_opts(options),
                 timeout=timeout,
                 detached=detached,
-                remove_orphans=remove_orphans)
+                remove_orphans=remove_orphans,
+                scale_override=parse_scale_args(options['--scale']),
+            )
 
             if detached:
                 return
@@ -1238,3 +1241,19 @@ def call_docker(args):
     log.debug(" ".join(map(pipes.quote, args)))
 
     return subprocess.call(args)
+
+
+def parse_scale_args(options):
+    res = {}
+    for s in options:
+        if '=' not in s:
+            raise UserError('Arguments to scale should be in the form service=num')
+        service_name, num = s.split('=', 1)
+        try:
+            num = int(num)
+        except ValueError:
+            raise UserError(
+                'Number of containers for service "%s" is not a number' % service_name
+            )
+        res[service_name] = num
+    return res

+ 4 - 3
compose/config/config.py

@@ -108,6 +108,7 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
     'log_opt',
     'logging',
     'network_mode',
+    'init',
 ]
 
 DOCKER_VALID_URL_PREFIXES = (
@@ -234,10 +235,10 @@ class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name conf
             config)
 
 
-def find(base_dir, filenames, environment, override_dir='.'):
+def find(base_dir, filenames, environment, override_dir=None):
     if filenames == ['-']:
         return ConfigDetails(
-            os.path.abspath(override_dir),
+            os.path.abspath(override_dir) if override_dir else os.getcwd(),
             [ConfigFile(None, yaml.safe_load(sys.stdin))],
             environment
         )
@@ -249,7 +250,7 @@ def find(base_dir, filenames, environment, override_dir='.'):
 
     log.debug("Using configuration files: {}".format(",".join(filenames)))
     return ConfigDetails(
-        override_dir or os.path.dirname(filenames[0]),
+        override_dir if override_dir else os.path.dirname(filenames[0]),
         [ConfigFile.from_filename(f) for f in filenames],
         environment
     )

+ 7 - 0
compose/config/config_schema_v2.0.json

@@ -253,6 +253,13 @@
                 "driver": {"type": "string"},
                 "config": {
                     "type": "array"
+                },
+                "options": {
+                  "type": "object",
+                  "patternProperties": {
+                    "^.+$": {"type": "string"}
+                  },
+                  "additionalProperties": false
                 }
             },
             "additionalProperties": false

+ 7 - 0
compose/config/config_schema_v2.1.json

@@ -298,6 +298,13 @@
                 "driver": {"type": "string"},
                 "config": {
                     "type": "array"
+                },
+                "options": {
+                  "type": "object",
+                  "patternProperties": {
+                    "^.+$": {"type": "string"}
+                  },
+                  "additionalProperties": false
                 }
             },
             "additionalProperties": false

+ 387 - 0
compose/config/config_schema_v2.2.json

@@ -0,0 +1,387 @@
+{
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "id": "config_schema_v2.2.json",
+  "type": "object",
+
+  "properties": {
+    "version": {
+      "type": "string"
+    },
+
+    "services": {
+      "id": "#/properties/services",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/service"
+        }
+      },
+      "additionalProperties": false
+    },
+
+    "networks": {
+      "id": "#/properties/networks",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/network"
+        }
+      }
+    },
+
+    "volumes": {
+      "id": "#/properties/volumes",
+      "type": "object",
+      "patternProperties": {
+        "^[a-zA-Z0-9._-]+$": {
+          "$ref": "#/definitions/volume"
+        }
+      },
+      "additionalProperties": false
+    }
+  },
+
+  "additionalProperties": false,
+
+  "definitions": {
+
+    "service": {
+      "id": "#/definitions/service",
+      "type": "object",
+
+      "properties": {
+        "build": {
+          "oneOf": [
+            {"type": "string"},
+            {
+              "type": "object",
+              "properties": {
+                "context": {"type": "string"},
+                "dockerfile": {"type": "string"},
+                "args": {"$ref": "#/definitions/list_or_dict"}
+              },
+              "additionalProperties": false
+            }
+          ]
+        },
+        "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "cgroup_parent": {"type": "string"},
+        "command": {
+          "oneOf": [
+            {"type": "string"},
+            {"type": "array", "items": {"type": "string"}}
+          ]
+        },
+        "container_name": {"type": "string"},
+        "cpu_shares": {"type": ["number", "string"]},
+        "cpu_quota": {"type": ["number", "string"]},
+        "cpuset": {"type": "string"},
+        "depends_on": {
+          "oneOf": [
+            {"$ref": "#/definitions/list_of_strings"},
+            {
+              "type": "object",
+              "additionalProperties": false,
+              "patternProperties": {
+                "^[a-zA-Z0-9._-]+$": {
+                  "type": "object",
+                  "additionalProperties": false,
+                  "properties": {
+                    "condition": {
+                      "type": "string",
+                      "enum": ["service_started", "service_healthy"]
+                    }
+                  },
+                  "required": ["condition"]
+                }
+              }
+            }
+          ]
+        },
+        "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "dns_opt": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          },
+          "uniqueItems": true
+        },
+        "dns": {"$ref": "#/definitions/string_or_list"},
+        "dns_search": {"$ref": "#/definitions/string_or_list"},
+        "domainname": {"type": "string"},
+        "entrypoint": {
+          "oneOf": [
+            {"type": "string"},
+            {"type": "array", "items": {"type": "string"}}
+          ]
+        },
+        "env_file": {"$ref": "#/definitions/string_or_list"},
+        "environment": {"$ref": "#/definitions/list_or_dict"},
+
+        "expose": {
+          "type": "array",
+          "items": {
+            "type": ["string", "number"],
+            "format": "expose"
+          },
+          "uniqueItems": true
+        },
+
+        "extends": {
+          "oneOf": [
+            {
+              "type": "string"
+            },
+            {
+              "type": "object",
+
+              "properties": {
+                "service": {"type": "string"},
+                "file": {"type": "string"}
+              },
+              "required": ["service"],
+              "additionalProperties": false
+            }
+          ]
+        },
+
+        "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+        "healthcheck": {"$ref": "#/definitions/healthcheck"},
+        "hostname": {"type": "string"},
+        "image": {"type": "string"},
+        "init": {"type": ["boolean", "string"]},
+        "ipc": {"type": "string"},
+        "isolation": {"type": "string"},
+        "labels": {"$ref": "#/definitions/list_or_dict"},
+        "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+        "logging": {
+            "type": "object",
+
+            "properties": {
+                "driver": {"type": "string"},
+                "options": {"type": "object"}
+            },
+            "additionalProperties": false
+        },
+
+        "mac_address": {"type": "string"},
+        "mem_limit": {"type": ["number", "string"]},
+        "mem_reservation": {"type": ["string", "integer"]},
+        "mem_swappiness": {"type": "integer"},
+        "memswap_limit": {"type": ["number", "string"]},
+        "network_mode": {"type": "string"},
+
+        "networks": {
+          "oneOf": [
+            {"$ref": "#/definitions/list_of_strings"},
+            {
+              "type": "object",
+              "patternProperties": {
+                "^[a-zA-Z0-9._-]+$": {
+                  "oneOf": [
+                    {
+                      "type": "object",
+                      "properties": {
+                        "aliases": {"$ref": "#/definitions/list_of_strings"},
+                        "ipv4_address": {"type": "string"},
+                        "ipv6_address": {"type": "string"},
+                        "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+                      },
+                      "additionalProperties": false
+                    },
+                    {"type": "null"}
+                  ]
+                }
+              },
+              "additionalProperties": false
+            }
+          ]
+        },
+        "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+        "group_add": {
+            "type": "array",
+            "items": {
+                "type": ["string", "number"]
+            },
+            "uniqueItems": true
+        },
+        "pid": {"type": ["string", "null"]},
+
+        "ports": {
+          "type": "array",
+          "items": {
+            "type": ["string", "number"],
+            "format": "ports"
+          },
+          "uniqueItems": true
+        },
+
+        "privileged": {"type": "boolean"},
+        "read_only": {"type": "boolean"},
+        "restart": {"type": "string"},
+        "scale": {"type": "integer"},
+        "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "shm_size": {"type": ["number", "string"]},
+        "sysctls": {"$ref": "#/definitions/list_or_dict"},
+        "pids_limit": {"type": ["number", "string"]},
+        "stdin_open": {"type": "boolean"},
+        "stop_grace_period": {"type": "string", "format": "duration"},
+        "stop_signal": {"type": "string"},
+        "tmpfs": {"$ref": "#/definitions/string_or_list"},
+        "tty": {"type": "boolean"},
+        "ulimits": {
+          "type": "object",
+          "patternProperties": {
+            "^[a-z]+$": {
+              "oneOf": [
+                {"type": "integer"},
+                {
+                  "type":"object",
+                  "properties": {
+                    "hard": {"type": "integer"},
+                    "soft": {"type": "integer"}
+                  },
+                  "required": ["soft", "hard"],
+                  "additionalProperties": false
+                }
+              ]
+            }
+          }
+        },
+        "user": {"type": "string"},
+        "userns_mode": {"type": "string"},
+        "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "volume_driver": {"type": "string"},
+        "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+        "working_dir": {"type": "string"}
+      },
+
+      "dependencies": {
+        "memswap_limit": ["mem_limit"]
+      },
+      "additionalProperties": false
+    },
+
+    "healthcheck": {
+      "id": "#/definitions/healthcheck",
+      "type": "object",
+      "additionalProperties": false,
+      "properties": {
+        "disable": {"type": "boolean"},
+        "interval": {"type": "string"},
+        "retries": {"type": "number"},
+        "test": {
+          "oneOf": [
+            {"type": "string"},
+            {"type": "array", "items": {"type": "string"}}
+          ]
+        },
+        "timeout": {"type": "string"}
+      }
+    },
+
+    "network": {
+      "id": "#/definitions/network",
+      "type": "object",
+      "properties": {
+        "driver": {"type": "string"},
+        "driver_opts": {
+          "type": "object",
+          "patternProperties": {
+            "^.+$": {"type": ["string", "number"]}
+          }
+        },
+        "ipam": {
+            "type": "object",
+            "properties": {
+                "driver": {"type": "string"},
+                "config": {
+                    "type": "array"
+                }
+            },
+            "additionalProperties": false
+        },
+        "external": {
+          "type": ["boolean", "object"],
+          "properties": {
+            "name": {"type": "string"}
+          },
+          "additionalProperties": false
+        },
+        "internal": {"type": "boolean"},
+        "enable_ipv6": {"type": "boolean"},
+        "labels": {"$ref": "#/definitions/list_or_dict"}
+      },
+      "additionalProperties": false
+    },
+
+    "volume": {
+      "id": "#/definitions/volume",
+      "type": ["object", "null"],
+      "properties": {
+        "driver": {"type": "string"},
+        "driver_opts": {
+          "type": "object",
+          "patternProperties": {
+            "^.+$": {"type": ["string", "number"]}
+          }
+        },
+        "external": {
+          "type": ["boolean", "object"],
+          "properties": {
+            "name": {"type": "string"}
+          },
+          "additionalProperties": false
+        },
+        "labels": {"$ref": "#/definitions/list_or_dict"}
+      },
+      "additionalProperties": false
+    },
+
+    "string_or_list": {
+      "oneOf": [
+        {"type": "string"},
+        {"$ref": "#/definitions/list_of_strings"}
+      ]
+    },
+
+    "list_of_strings": {
+      "type": "array",
+      "items": {"type": "string"},
+      "uniqueItems": true
+    },
+
+    "list_or_dict": {
+      "oneOf": [
+        {
+          "type": "object",
+          "patternProperties": {
+            ".+": {
+              "type": ["string", "number", "null"]
+            }
+          },
+          "additionalProperties": false
+        },
+        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+      ]
+    },
+
+    "constraints": {
+      "service": {
+        "id": "#/definitions/constraints/service",
+        "anyOf": [
+          {"required": ["build"]},
+          {"required": ["image"]}
+        ],
+        "properties": {
+          "build": {
+            "required": ["context"]
+          }
+        }
+      }
+    }
+  }
+}

+ 7 - 6
compose/config/serialize.py

@@ -7,8 +7,9 @@ import yaml
 from compose.config import types
 from compose.const import COMPOSEFILE_V1 as V1
 from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
 from compose.const import COMPOSEFILE_V3_1 as V3_1
-from compose.const import COMPOSEFILE_V3_1 as V3_2
+from compose.const import COMPOSEFILE_V3_2 as V3_2
 
 
 def serialize_config_type(dumper, data):
@@ -95,7 +96,7 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
     if version == V1 and 'network_mode' not in service_dict:
         service_dict['network_mode'] = 'bridge'
 
-    if 'depends_on' in service_dict and version != V2_1:
+    if 'depends_on' in service_dict and version not in (V2_1, V2_2):
         service_dict['depends_on'] = sorted([
             svc for svc in service_dict['depends_on'].keys()
         ])
@@ -111,9 +112,9 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
             )
 
     if 'ports' in service_dict and version not in (V3_2,):
-        service_dict['ports'] = map(
-            lambda p: p.legacy_repr() if isinstance(p, types.ServicePort) else p,
-            service_dict['ports']
-        )
+        service_dict['ports'] = [
+            p.legacy_repr() if isinstance(p, types.ServicePort) else p
+            for p in service_dict['ports']
+        ]
 
     return service_dict

+ 2 - 2
compose/config/types.py

@@ -267,7 +267,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
     @classmethod
     def parse(cls, spec):
         if isinstance(spec, cls):
-            # WHen extending a service with ports, the port definitions have already been parsed
+            # When extending a service with ports, the port definitions have already been parsed
             return [spec]
 
         if not isinstance(spec, dict):
@@ -316,7 +316,7 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
 def normalize_port_dict(port):
     return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
         published=port.get('published', ''),
-        is_pub=(':' if port.get('published') else ''),
+        is_pub=(':' if port.get('published') or port.get('external_ip') else ''),
         target=port.get('target'),
         protocol=port.get('protocol', 'tcp'),
         external_ip=port.get('external_ip', ''),

+ 3 - 0
compose/const.py

@@ -21,6 +21,7 @@ SECRETS_PATH = '/run/secrets'
 COMPOSEFILE_V1 = '1'
 COMPOSEFILE_V2_0 = '2.0'
 COMPOSEFILE_V2_1 = '2.1'
+COMPOSEFILE_V2_2 = '2.2'
 
 COMPOSEFILE_V3_0 = '3.0'
 COMPOSEFILE_V3_1 = '3.1'
@@ -30,6 +31,7 @@ API_VERSIONS = {
     COMPOSEFILE_V1: '1.21',
     COMPOSEFILE_V2_0: '1.22',
     COMPOSEFILE_V2_1: '1.24',
+    COMPOSEFILE_V2_2: '1.25',
     COMPOSEFILE_V3_0: '1.25',
     COMPOSEFILE_V3_1: '1.25',
     COMPOSEFILE_V3_2: '1.25',
@@ -39,6 +41,7 @@ API_VERSION_TO_ENGINE_VERSION = {
     API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
     API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
     API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
+    API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
     API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
     API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
     API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',

+ 7 - 0
compose/network.py

@@ -123,6 +123,7 @@ def create_ipam_config_from_dict(ipam_dict):
             )
             for config in ipam_dict.get('config', [])
         ],
+        options=ipam_dict.get('options')
     )
 
 
@@ -157,6 +158,12 @@ def check_remote_ipam_config(remote, local):
             if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
                 raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
 
+    remote_opts = remote_ipam.get('Options', {})
+    local_opts = local.ipam.get('options', {})
+    for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+        if remote_opts.get(k) != local_opts.get(k):
+            raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+
 
 def check_remote_network_config(remote, local):
     if local.driver and remote.get('Driver') != local.driver:

+ 0 - 4
compose/parallel.py

@@ -260,10 +260,6 @@ def parallel_remove(containers, options):
     parallel_operation(stopped_containers, 'remove', options, 'Removing')
 
 
-def parallel_start(containers, options):
-    parallel_operation(containers, 'start', options, 'Starting')
-
-
 def parallel_pause(containers, options):
     parallel_operation(containers, 'pause', options, 'Pausing')
 

+ 17 - 8
compose/project.py

@@ -57,12 +57,13 @@ class Project(object):
     """
     A collection of services.
     """
-    def __init__(self, name, services, client, networks=None, volumes=None):
+    def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
         self.name = name
         self.services = services
         self.client = client
         self.volumes = volumes or ProjectVolumes({})
         self.networks = networks or ProjectNetworks({}, False)
+        self.config_version = config_version
 
     def labels(self, one_off=OneOffFilter.exclude):
         labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
@@ -82,7 +83,7 @@ class Project(object):
             networks,
             use_networking)
         volumes = ProjectVolumes.from_config(name, config_data, client)
-        project = cls(name, [], client, project_networks, volumes)
+        project = cls(name, [], client, project_networks, volumes, config_data.version)
 
         for service_dict in config_data.services:
             service_dict = dict(service_dict)
@@ -380,13 +381,17 @@ class Project(object):
            do_build=BuildAction.none,
            timeout=None,
            detached=False,
-           remove_orphans=False):
+           remove_orphans=False,
+           scale_override=None):
 
         warn_for_swarm_mode(self.client)
 
         self.initialize()
         self.find_orphan_containers(remove_orphans)
 
+        if scale_override is None:
+            scale_override = {}
+
         services = self.get_services_without_duplicate(
             service_names,
             include_deps=start_deps)
@@ -399,7 +404,8 @@ class Project(object):
             return service.execute_convergence_plan(
                 plans[service.name],
                 timeout=timeout,
-                detached=detached
+                detached=detached,
+                scale_override=scale_override.get(service.name)
             )
 
         def get_deps(service):
@@ -589,10 +595,13 @@ def get_secrets(service, service_secrets, secret_defs):
             continue
 
         if secret.uid or secret.gid or secret.mode:
-            log.warn("Service \"{service}\" uses secret \"{secret}\" with uid, "
-                     "gid, or mode. These fields are not supported by this "
-                     "implementation of the Compose file".format(
-                        service=service, secret=secret.source))
+            log.warn(
+                "Service \"{service}\" uses secret \"{secret}\" with uid, "
+                "gid, or mode. These fields are not supported by this "
+                "implementation of the Compose file".format(
+                    service=service, secret=secret.source
+                )
+            )
 
         secrets.append({'secret': secret, 'file': secret_def.get('file')})
 

+ 130 - 93
compose/service.py

@@ -38,7 +38,6 @@ from .errors import HealthCheckFailed
 from .errors import NoHealthCheckConfigured
 from .errors import OperationFailedError
 from .parallel import parallel_execute
-from .parallel import parallel_start
 from .progress_stream import stream_output
 from .progress_stream import StreamOutputError
 from .utils import json_hash
@@ -48,7 +47,7 @@ from .utils import parse_seconds_float
 log = logging.getLogger(__name__)
 
 
-DOCKER_START_KEYS = [
+HOST_CONFIG_KEYS = [
     'cap_add',
     'cap_drop',
     'cgroup_parent',
@@ -60,6 +59,7 @@ DOCKER_START_KEYS = [
     'env_file',
     'extra_hosts',
     'group_add',
+    'init',
     'ipc',
     'read_only',
     'log_driver',
@@ -147,6 +147,7 @@ class Service(object):
         network_mode=None,
         networks=None,
         secrets=None,
+        scale=None,
         **options
     ):
         self.name = name
@@ -158,6 +159,7 @@ class Service(object):
         self.network_mode = network_mode or NetworkMode(None)
         self.networks = networks or {}
         self.secrets = secrets or []
+        self.scale_num = scale or 1
         self.options = options
 
     def __repr__(self):
@@ -188,16 +190,7 @@ class Service(object):
             self.start_container_if_stopped(c, **options)
         return containers
 
-    def scale(self, desired_num, timeout=None):
-        """
-        Adjusts the number of containers to the specified number and ensures
-        they are running.
-
-        - creates containers until there are at least `desired_num`
-        - stops containers until there are at most `desired_num` running
-        - starts containers until there are at least `desired_num` running
-        - removes all stopped containers
-        """
+    def show_scale_warnings(self, desired_num):
         if self.custom_container_name and desired_num > 1:
             log.warn('The "%s" service is using the custom container name "%s". '
                      'Docker requires each container to have a unique name. '
@@ -209,14 +202,18 @@ class Service(object):
                      'for this service are created on a single host, the port will clash.'
                      % self.name)
 
-        def create_and_start(service, number):
-            container = service.create_container(number=number, quiet=True)
-            service.start_container(container)
-            return container
+    def scale(self, desired_num, timeout=None):
+        """
+        Adjusts the number of containers to the specified number and ensures
+        they are running.
 
-        def stop_and_remove(container):
-            container.stop(timeout=self.stop_timeout(timeout))
-            container.remove()
+        - creates containers until there are at least `desired_num`
+        - stops containers until there are at most `desired_num` running
+        - starts containers until there are at least `desired_num` running
+        - removes all stopped containers
+        """
+
+        self.show_scale_warnings(desired_num)
 
         running_containers = self.containers(stopped=False)
         num_running = len(running_containers)
@@ -227,11 +224,10 @@ class Service(object):
             return
 
         if desired_num > num_running:
-            # we need to start/create until we have desired_num
             all_containers = self.containers(stopped=True)
 
             if num_running != len(all_containers):
-                # we have some stopped containers, let's start them up again
+                # we have some stopped containers, check for divergences
                 stopped_containers = [
                     c for c in all_containers if not c.is_running
                 ]
@@ -240,38 +236,14 @@ class Service(object):
                 divergent_containers = [
                     c for c in stopped_containers if self._containers_have_diverged([c])
                 ]
-                stopped_containers = sorted(
-                    set(stopped_containers) - set(divergent_containers),
-                    key=attrgetter('number')
-                )
                 for c in divergent_containers:
                         c.remove()
 
-                num_stopped = len(stopped_containers)
-
-                if num_stopped + num_running > desired_num:
-                    num_to_start = desired_num - num_running
-                    containers_to_start = stopped_containers[:num_to_start]
-                else:
-                    containers_to_start = stopped_containers
+                all_containers = list(set(all_containers) - set(divergent_containers))
 
-                parallel_start(containers_to_start, {})
-
-                num_running += len(containers_to_start)
-
-            num_to_create = desired_num - num_running
-            next_number = self._next_container_number()
-            container_numbers = [
-                number for number in range(
-                    next_number, next_number + num_to_create
-                )
-            ]
-
-            parallel_execute(
-                container_numbers,
-                lambda n: create_and_start(service=self, number=n),
-                lambda n: self.get_container_name(n),
-                "Creating and starting"
+            sorted_containers = sorted(all_containers, key=attrgetter('number'))
+            self._execute_convergence_start(
+                sorted_containers, desired_num, timeout, True, True
             )
 
         if desired_num < num_running:
@@ -281,12 +253,7 @@ class Service(object):
                 running_containers,
                 key=attrgetter('number'))
 
-            parallel_execute(
-                sorted_running_containers[-num_to_stop:],
-                stop_and_remove,
-                lambda c: c.name,
-                "Stopping and removing",
-            )
+            self._downscale(sorted_running_containers[-num_to_stop:], timeout)
 
     def create_container(self,
                          one_off=False,
@@ -399,51 +366,120 @@ class Service(object):
 
         return has_diverged
 
-    def execute_convergence_plan(self,
-                                 plan,
-                                 timeout=None,
-                                 detached=False,
-                                 start=True):
-        (action, containers) = plan
-        should_attach_logs = not detached
+    def _execute_convergence_create(self, scale, detached, start):
+            i = self._next_container_number()
 
-        if action == 'create':
-            container = self.create_container()
+            def create_and_start(service, n):
+                container = service.create_container(number=n)
+                if not detached:
+                    container.attach_log_stream()
+                if start:
+                    self.start_container(container)
+                return container
 
-            if should_attach_logs:
-                container.attach_log_stream()
+            containers, errors = parallel_execute(
+                range(i, i + scale),
+                lambda n: create_and_start(self, n),
+                lambda n: self.get_container_name(n),
+                "Creating"
+            )
+            for error in errors.values():
+                raise OperationFailedError(error)
 
-            if start:
-                self.start_container(container)
+            return containers
 
-            return [container]
+    def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+            if len(containers) > scale:
+                self._downscale(containers[scale:], timeout)
+                containers = containers[:scale]
 
-        elif action == 'recreate':
-            return [
-                self.recreate_container(
-                    container,
-                    timeout=timeout,
-                    attach_logs=should_attach_logs,
+            def recreate(container):
+                return self.recreate_container(
+                    container, timeout=timeout, attach_logs=not detached,
                     start_new_container=start
                 )
-                for container in containers
-            ]
+            containers, errors = parallel_execute(
+                containers,
+                recreate,
+                lambda c: c.name,
+                "Recreating"
+            )
+            for error in errors.values():
+                raise OperationFailedError(error)
 
-        elif action == 'start':
+            if len(containers) < scale:
+                containers.extend(self._execute_convergence_create(
+                    scale - len(containers), detached, start
+                ))
+            return containers
+
+    def _execute_convergence_start(self, containers, scale, timeout, detached, start):
+            if len(containers) > scale:
+                self._downscale(containers[scale:], timeout)
+                containers = containers[:scale]
             if start:
-                for container in containers:
-                    self.start_container_if_stopped(container, attach_logs=should_attach_logs)
+                _, errors = parallel_execute(
+                    containers,
+                    lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+                    lambda c: c.name,
+                    "Starting"
+                )
+
+                for error in errors.values():
+                    raise OperationFailedError(error)
 
+            if len(containers) < scale:
+                containers.extend(self._execute_convergence_create(
+                    scale - len(containers), detached, start
+                ))
             return containers
 
-        elif action == 'noop':
+    def _downscale(self, containers, timeout=None):
+        def stop_and_remove(container):
+            container.stop(timeout=self.stop_timeout(timeout))
+            container.remove()
+
+        parallel_execute(
+            containers,
+            stop_and_remove,
+            lambda c: c.name,
+            "Stopping and removing",
+        )
+
+    def execute_convergence_plan(self, plan, timeout=None, detached=False,
+                                 start=True, scale_override=None):
+        (action, containers) = plan
+        scale = scale_override if scale_override is not None else self.scale_num
+        containers = sorted(containers, key=attrgetter('number'))
+
+        self.show_scale_warnings(scale)
+
+        if action == 'create':
+            return self._execute_convergence_create(
+                scale, detached, start
+            )
+
+        if action == 'recreate':
+            return self._execute_convergence_recreate(
+                containers, scale, timeout, detached, start
+            )
+
+        if action == 'start':
+            return self._execute_convergence_start(
+                containers, scale, timeout, detached, start
+            )
+
+        if action == 'noop':
+            if scale != len(containers):
+                return self._execute_convergence_start(
+                    containers, scale, timeout, detached, start
+                )
             for c in containers:
                 log.info("%s is up-to-date" % c.name)
 
             return containers
 
-        else:
-            raise Exception("Invalid action: {}".format(action))
+        raise Exception("Invalid action: {}".format(action))
 
     def recreate_container(
             self,
@@ -729,8 +765,8 @@ class Service(object):
             number,
             self.config_hash if add_config_hash else None)
 
-        # Delete options which are only used when starting
-        for key in DOCKER_START_KEYS:
+        # Delete options which are only used in HostConfig
+        for key in HOST_CONFIG_KEYS:
             container_options.pop(key, None)
 
         container_options['host_config'] = self._get_container_host_config(
@@ -750,8 +786,12 @@ class Service(object):
 
         logging_dict = options.get('logging', None)
         log_config = get_log_config(logging_dict)
+        init_path = None
+        if isinstance(options.get('init'), six.string_types):
+            init_path = options.get('init')
+            options['init'] = True
 
-        host_config = self.client.create_host_config(
+        return self.client.create_host_config(
             links=self._get_links(link_to_self=one_off),
             port_bindings=build_port_bindings(
                 formatted_ports(options.get('ports', []))
@@ -786,15 +826,12 @@ class Service(object):
             oom_score_adj=options.get('oom_score_adj'),
             mem_swappiness=options.get('mem_swappiness'),
             group_add=options.get('group_add'),
-            userns_mode=options.get('userns_mode')
+            userns_mode=options.get('userns_mode'),
+            init=options.get('init', None),
+            init_path=init_path,
+            isolation=options.get('isolation'),
         )
 
-        # TODO: Add as an argument to create_host_config once it's supported
-        # in docker-py
-        host_config['Isolation'] = options.get('isolation')
-
-        return host_config
-
     def get_secret_volumes(self):
         def build_spec(secret):
             target = '{}/{}'.format(

+ 5 - 0
docker-compose.spec

@@ -32,6 +32,11 @@ exe = EXE(pyz,
                 'compose/config/config_schema_v2.1.json',
                 'DATA'
             ),
+            (
+                'compose/config/config_schema_v2.2.json',
+                'compose/config/config_schema_v2.2.json',
+                'DATA'
+            ),
             (
                 'compose/config/config_schema_v3.0.json',
                 'compose/config/config_schema_v3.0.json',

+ 1 - 1
script/run/run.sh

@@ -15,7 +15,7 @@
 
 set -e
 
-VERSION="1.12.0"
+VERSION="1.13.0-rc1"
 IMAGE="docker/compose:$VERSION"
 
 

+ 55 - 1
tests/acceptance/cli_test.py

@@ -151,7 +151,7 @@ class CLITestCase(DockerClientTestCase):
     def test_help(self):
         self.base_dir = 'tests/fixtures/no-composefile'
         result = self.dispatch(['help', 'up'], returncode=0)
-        assert 'Usage: up [options] [SERVICE...]' in result.stdout
+        assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
         # Prevent tearDown from trying to create a project
         self.base_dir = None
 
@@ -323,6 +323,7 @@ class CLITestCase(DockerClientTestCase):
         assert yaml.load(result.stdout) == {
             'version': '3.2',
             'networks': {},
+            'secrets': {},
             'volumes': {
                 'foobar': {
                     'labels': {
@@ -1865,6 +1866,59 @@ class CLITestCase(DockerClientTestCase):
         self.assertEqual(len(project.get_service('simple').containers()), 0)
         self.assertEqual(len(project.get_service('another').containers()), 0)
 
+    def test_scale_v2_2(self):
+        self.base_dir = 'tests/fixtures/scale'
+        result = self.dispatch(['scale', 'web=1'], returncode=1)
+        assert 'incompatible with the v2.2 format' in result.stderr
+
+    def test_up_scale_scale_up(self):
+        self.base_dir = 'tests/fixtures/scale'
+        project = self.project
+
+        self.dispatch(['up', '-d'])
+        assert len(project.get_service('web').containers()) == 2
+        assert len(project.get_service('db').containers()) == 1
+
+        self.dispatch(['up', '-d', '--scale', 'web=3'])
+        assert len(project.get_service('web').containers()) == 3
+        assert len(project.get_service('db').containers()) == 1
+
+    def test_up_scale_scale_down(self):
+        self.base_dir = 'tests/fixtures/scale'
+        project = self.project
+
+        self.dispatch(['up', '-d'])
+        assert len(project.get_service('web').containers()) == 2
+        assert len(project.get_service('db').containers()) == 1
+
+        self.dispatch(['up', '-d', '--scale', 'web=1'])
+        assert len(project.get_service('web').containers()) == 1
+        assert len(project.get_service('db').containers()) == 1
+
+    def test_up_scale_reset(self):
+        self.base_dir = 'tests/fixtures/scale'
+        project = self.project
+
+        self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
+        assert len(project.get_service('web').containers()) == 3
+        assert len(project.get_service('db').containers()) == 3
+
+        self.dispatch(['up', '-d'])
+        assert len(project.get_service('web').containers()) == 2
+        assert len(project.get_service('db').containers()) == 1
+
+    def test_up_scale_to_zero(self):
+        self.base_dir = 'tests/fixtures/scale'
+        project = self.project
+
+        self.dispatch(['up', '-d'])
+        assert len(project.get_service('web').containers()) == 2
+        assert len(project.get_service('db').containers()) == 1
+
+        self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
+        assert len(project.get_service('web').containers()) == 0
+        assert len(project.get_service('db').containers()) == 0
+
     def test_port(self):
         self.base_dir = 'tests/fixtures/ports-composefile'
         self.dispatch(['up', '-d'], None)

+ 9 - 0
tests/fixtures/scale/docker-compose.yml

@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+    web:
+        image: busybox
+        command: top
+        scale: 2
+    db:
+      image: busybox
+      command: top

+ 66 - 3
tests/integration/project_test.py

@@ -19,6 +19,7 @@ from compose.config.types import VolumeFromSpec
 from compose.config.types import VolumeSpec
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
 from compose.const import COMPOSEFILE_V3_1 as V3_1
 from compose.const import LABEL_PROJECT
 from compose.const import LABEL_SERVICE
@@ -564,12 +565,12 @@ class ProjectTest(DockerClientTestCase):
         self.assertEqual(len(service.containers()), 3)
         project.up()
         service = project.get_service('web')
-        self.assertEqual(len(service.containers()), 3)
+        self.assertEqual(len(service.containers()), 1)
         service.scale(1)
         self.assertEqual(len(service.containers()), 1)
-        project.up()
+        project.up(scale_override={'web': 3})
         service = project.get_service('web')
-        self.assertEqual(len(service.containers()), 1)
+        self.assertEqual(len(service.containers()), 3)
         # does scale=0 ,makes any sense? after recreating at least 1 container is running
         service.scale(0)
         project.up()
@@ -681,6 +682,41 @@ class ProjectTest(DockerClientTestCase):
             }],
         }
 
+    @v2_only()
+    def test_up_with_ipam_options(self):
+        config_data = build_config(
+            version=V2_0,
+            services=[{
+                'name': 'web',
+                'image': 'busybox:latest',
+                'networks': {'front': None},
+            }],
+            networks={
+                'front': {
+                    'driver': 'bridge',
+                    'ipam': {
+                        'driver': 'default',
+                        'options': {
+                            "com.docker.compose.network.test": "9-29-045"
+                        }
+                    },
+                },
+            },
+        )
+
+        project = Project.from_config(
+            client=self.client,
+            name='composetest',
+            config_data=config_data,
+        )
+        project.up()
+
+        network = self.client.networks(names=['composetest_front'])[0]
+
+        assert network['IPAM']['Options'] == {
+            "com.docker.compose.network.test": "9-29-045"
+        }
+
     @v2_only()
     def test_up_with_network_static_addresses(self):
         config_data = build_config(
@@ -1102,6 +1138,33 @@ class ProjectTest(DockerClientTestCase):
         containers = project.containers()
         self.assertEqual(len(containers), 1)
 
+    def test_project_up_config_scale(self):
+        config_data = build_config(
+            version=V2_2,
+            services=[{
+                'name': 'web',
+                'image': 'busybox:latest',
+                'command': 'top',
+                'scale': 3
+            }]
+        )
+
+        project = Project.from_config(
+            name='composetest', config_data=config_data, client=self.client
+        )
+        project.up()
+        assert len(project.containers()) == 3
+
+        project.up(scale_override={'web': 2})
+        assert len(project.containers()) == 2
+
+        project.up(scale_override={'web': 4})
+        assert len(project.containers()) == 4
+
+        project.stop()
+        project.up()
+        assert len(project.containers()) == 3
+
     @v2_only()
     def test_initialize_volumes(self):
         vol_name = '{0:x}'.format(random.getrandbits(32))

+ 26 - 8
tests/integration/service_test.py

@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 import os
 import shutil
 import tempfile
+from distutils.spawn import find_executable
 from os import path
 
 import pytest
@@ -25,6 +26,7 @@ from compose.const import LABEL_PROJECT
 from compose.const import LABEL_SERVICE
 from compose.const import LABEL_VERSION
 from compose.container import Container
+from compose.errors import OperationFailedError
 from compose.project import OneOffFilter
 from compose.service import ConvergencePlan
 from compose.service import ConvergenceStrategy
@@ -115,6 +117,21 @@ class ServiceTest(DockerClientTestCase):
         service.start_container(container)
         self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
 
+    def test_create_container_with_init_bool(self):
+        self.require_api_version('1.25')
+        service = self.create_service('db', init=True)
+        container = service.create_container()
+        service.start_container(container)
+        assert container.get('HostConfig.Init') is True
+
+    def test_create_container_with_init_path(self):
+        self.require_api_version('1.25')
+        docker_init_path = find_executable('docker-init')
+        service = self.create_service('db', init=docker_init_path)
+        container = service.create_container()
+        service.start_container(container)
+        assert container.get('HostConfig.InitPath') == docker_init_path
+
     @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
     def test_create_container_with_pids_limit(self):
         self.require_api_version('1.23')
@@ -761,15 +778,15 @@ class ServiceTest(DockerClientTestCase):
                 message="testing",
                 response={},
                 explanation="Boom")):
-
             with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
-                service.scale(3)
+                with pytest.raises(OperationFailedError):
+                    service.scale(3)
 
-        self.assertEqual(len(service.containers()), 1)
-        self.assertTrue(service.containers()[0].is_running)
-        self.assertIn(
-            "ERROR: for composetest_web_2  Cannot create container for service web: Boom",
-            mock_stderr.getvalue()
+        assert len(service.containers()) == 1
+        assert service.containers()[0].is_running
+        assert (
+            "ERROR: for composetest_web_2  Cannot create container for service"
+            " web: Boom" in mock_stderr.getvalue()
         )
 
     def test_scale_with_unexpected_exception(self):
@@ -821,7 +838,8 @@ class ServiceTest(DockerClientTestCase):
         service = self.create_service('app', container_name='custom-container')
         self.assertEqual(service.custom_container_name, 'custom-container')
 
-        service.scale(3)
+        with pytest.raises(OperationFailedError):
+            service.scale(3)
 
         captured_output = mock_log.warn.call_args[0][0]
 

+ 3 - 3
tests/integration/testcases.py

@@ -15,7 +15,7 @@ from compose.const import API_VERSIONS
 from compose.const import COMPOSEFILE_V1 as V1
 from compose.const import COMPOSEFILE_V2_0 as V2_0
 from compose.const import COMPOSEFILE_V2_0 as V2_1
-from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
 from compose.const import LABEL_PROJECT
 from compose.progress_stream import stream_output
 from compose.service import Service
@@ -37,7 +37,7 @@ def get_links(container):
 
 def engine_max_version():
     if 'DOCKER_VERSION' not in os.environ:
-        return V3_0
+        return V3_2
     version = os.environ['DOCKER_VERSION'].partition('-')[0]
     if version_lt(version, '1.10'):
         return V1
@@ -45,7 +45,7 @@ def engine_max_version():
         return V2_0
     if version_lt(version, '1.13'):
         return V2_1
-    return V3_0
+    return V3_2
 
 
 def build_version_required_decorator(ignored_versions):

+ 19 - 0
tests/unit/cli/command_test.py

@@ -1,9 +1,11 @@
+# ~*~ encoding: utf-8 ~*~
 from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import os
 
 import pytest
+import six
 
 from compose.cli.command import get_config_path_from_options
 from compose.config.environment import Environment
@@ -55,3 +57,20 @@ class TestGetConfigPathFromOptions(object):
     def test_no_path(self):
         environment = Environment.from_env_file('.')
         assert not get_config_path_from_options('.', {}, environment)
+
+    def test_unicode_path_from_options(self):
+        paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
+        opts = {'--file': paths}
+        environment = Environment.from_env_file('.')
+        assert get_config_path_from_options(
+            '.', opts, environment
+        ) == ['就吃饭/docker-compose.yml']
+
+    @pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
+    def test_unicode_path_from_env(self):
+        with mock.patch.dict(os.environ):
+            os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
+            environment = Environment.from_env_file('.')
+            assert get_config_path_from_options(
+                '.', {}, environment
+            ) == ['就吃饭/docker-compose.yml']

+ 12 - 0
tests/unit/config/config_test.py

@@ -3837,3 +3837,15 @@ class SerializeTest(unittest.TestCase):
         serialized_service = serialized_config['services']['web']
         assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
         assert 'secrets' in serialized_config
+
+    def test_serialize_ports(self):
+        config_dict = config.Config(version='2.0', services=[
+            {
+                'ports': [types.ServicePort('80', '8080', None, None, None)],
+                'image': 'alpine',
+                'name': 'web'
+            }
+        ], volumes={}, networks={}, secrets={})
+
+        serialized_config = yaml.load(serialize_config(config_dict))
+        assert '8080:80/tcp' in serialized_config['services']['web']['ports']

+ 10 - 0
tests/unit/config/types_test.py

@@ -71,6 +71,16 @@ class TestServicePort(object):
         }
         assert ports[0].legacy_repr() == port_def
 
+    def test_parse_ext_ip_no_published_port(self):
+        port_def = '1.1.1.1::3000'
+        ports = ServicePort.parse(port_def)
+        assert len(ports) == 1
+        assert ports[0].legacy_repr() == port_def + '/tcp'
+        assert ports[0].repr() == {
+            'target': '3000',
+            'external_ip': '1.1.1.1',
+        }
+
     def test_parse_port_range(self):
         ports = ServicePort.parse('25000-25001:4000-4001')
         assert len(ports) == 2