| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082 | from __future__ import absolute_importfrom __future__ import unicode_literalsimport loggingimport osimport reimport sysfrom collections import namedtuplefrom operator import attrgetterimport enumimport sixfrom docker.errors import APIErrorfrom docker.utils import LogConfigfrom docker.utils.ports import build_port_bindingsfrom docker.utils.ports import split_portfrom . import __version__from .config import DOCKER_CONFIG_KEYSfrom .config import merge_environmentfrom .config.validation import VALID_NAME_CHARSfrom .const import DEFAULT_TIMEOUTfrom .const import IS_WINDOWS_PLATFORMfrom .const import LABEL_CONFIG_HASHfrom .const import LABEL_CONTAINER_NUMBERfrom .const import LABEL_ONE_OFFfrom .const import LABEL_PROJECTfrom .const import LABEL_SERVICEfrom .const import LABEL_VERSIONfrom .container import Containerfrom .legacy import check_for_legacy_containersfrom .progress_stream import stream_outputfrom .progress_stream import StreamOutputErrorfrom .utils import json_hashfrom .utils import parallel_executelog = logging.getLogger(__name__)DOCKER_START_KEYS = [    'cap_add',    'cap_drop',    'cgroup_parent',    'devices',    'dns',    'dns_search',    'env_file',    'extra_hosts',    'ipc',    'read_only',    'net',    'log_driver',    'log_opt',    'mem_limit',    'memswap_limit',    'pid',    'privileged',    'restart',    'volumes_from',    'security_opt',]class BuildError(Exception):    def __init__(self, service, reason):        self.service = service        self.reason = reasonclass ConfigError(ValueError):    passclass NeedsBuildError(Exception):    def __init__(self, service):        self.service = serviceclass NoSuchImageError(Exception):    passVolumeSpec = namedtuple('VolumeSpec', 'external internal mode')VolumeFromSpec = namedtuple('VolumeFromSpec', 'source mode')ServiceName = namedtuple('ServiceName', 'project service number')ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')@enum.uniqueclass ConvergenceStrategy(enum.Enum):    """Enumeration for all possible convergence strategies. Values refer to    when containers should be recreated.    """    changed = 1    always = 2    never = 3    @property    def allows_recreate(self):        return self is not type(self).neverclass Service(object):    def __init__(        self,        name,        client=None,        project='default',        use_networking=False,        links=None,        volumes_from=None,        net=None,        **options    ):        if not re.match('^%s+$' % VALID_NAME_CHARS, project):            raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))        self.name = name        self.client = client        self.project = project        self.use_networking = use_networking        self.links = links or []        self.volumes_from = volumes_from or []        self.net = net or Net(None)        self.options = options    def containers(self, stopped=False, one_off=False, filters={}):        filters.update({'label': self.labels(one_off=one_off)})        containers = list(filter(None, [            Container.from_ps(self.client, container)            for container in self.client.containers(                all=stopped,                filters=filters)]))        if not containers:            check_for_legacy_containers(                self.client,                self.project,                [self.name],            )        return containers    def get_container(self, number=1):        """Return a :class:`compose.container.Container` for this service. The        container must be active, and match `number`.        """        labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]        for container in self.client.containers(filters={'label': labels}):            return Container.from_ps(self.client, container)        raise ValueError("No container found for %s_%s" % (self.name, number))    def start(self, **options):        for c in self.containers(stopped=True):            self.start_container_if_stopped(c, **options)    # TODO: remove these functions, project takes care of starting/stopping,    def stop(self, **options):        for c in self.containers():            log.info("Stopping %s" % c.name)            c.stop(**options)    def pause(self, **options):        for c in self.containers(filters={'status': 'running'}):            log.info("Pausing %s" % c.name)            c.pause(**options)    def unpause(self, **options):        for c in self.containers(filters={'status': 'paused'}):            log.info("Unpausing %s" % c.name)            c.unpause()    def kill(self, **options):        for c in self.containers():            log.info("Killing %s" % c.name)            c.kill(**options)    def restart(self, **options):        for c in self.containers():            log.info("Restarting %s" % c.name)            c.restart(**options)    # end TODO    def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):        """        Adjusts the number of containers to the specified number and ensures        they are running.        - creates containers until there are at least `desired_num`        - stops containers until there are at most `desired_num` running        - starts containers until there are at least `desired_num` running        - removes all stopped containers        """        if self.custom_container_name() and desired_num > 1:            log.warn('The "%s" service is using the custom container name "%s". '                     'Docker requires each container to have a unique name. '                     'Remove the custom name to scale the service.'                     % (self.name, self.custom_container_name()))        if self.specifies_host_port():            log.warn('The "%s" service specifies a port on the host. If multiple containers '                     'for this service are created on a single host, the port will clash.'                     % self.name)        def create_and_start(service, number):            container = service.create_container(number=number, quiet=True)            container.start()            return container        running_containers = self.containers(stopped=False)        num_running = len(running_containers)        if desired_num == num_running:            # do nothing as we already have the desired number            log.info('Desired container number already achieved')            return        if desired_num > num_running:            # we need to start/create until we have desired_num            all_containers = self.containers(stopped=True)            if num_running != len(all_containers):                # we have some stopped containers, let's start them up again                stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))                num_stopped = len(stopped_containers)                if num_stopped + num_running > desired_num:                    num_to_start = desired_num - num_running                    containers_to_start = stopped_containers[:num_to_start]                else:                    containers_to_start = stopped_containers                parallel_execute(                    objects=containers_to_start,                    obj_callable=lambda c: c.start(),                    msg_index=lambda c: c.name,                    msg="Starting"                )                num_running += len(containers_to_start)            num_to_create = desired_num - num_running            next_number = self._next_container_number()            container_numbers = [                number for number in range(                    next_number, next_number + num_to_create                )            ]            parallel_execute(                objects=container_numbers,                obj_callable=lambda n: create_and_start(service=self, number=n),                msg_index=lambda n: n,                msg="Creating and starting"            )        if desired_num < num_running:            num_to_stop = num_running - desired_num            sorted_running_containers = sorted(running_containers, key=attrgetter('number'))            containers_to_stop = sorted_running_containers[-num_to_stop:]            parallel_execute(                objects=containers_to_stop,                obj_callable=lambda c: c.stop(timeout=timeout),                msg_index=lambda c: c.name,                msg="Stopping"            )        self.remove_stopped()    def remove_stopped(self, **options):        containers = [c for c in self.containers(stopped=True) if not c.is_running]        parallel_execute(            objects=containers,            obj_callable=lambda c: c.remove(**options),            msg_index=lambda c: c.name,            msg="Removing"        )    def create_container(self,                         one_off=False,                         do_build=True,                         previous_container=None,                         number=None,                         quiet=False,                         **override_options):        """        Create a container for this service. If the image doesn't exist, attempt to pull        it.        """        self.ensure_image_exists(do_build=do_build)        container_options = self._get_container_create_options(            override_options,            number or self._next_container_number(one_off=one_off),            one_off=one_off,            previous_container=previous_container,        )        if 'name' in container_options and not quiet:            log.info("Creating %s" % container_options['name'])        return Container.create(self.client, **container_options)    def ensure_image_exists(self, do_build=True):        try:            self.image()            return        except NoSuchImageError:            pass        if self.can_be_built():            if do_build:                self.build()            else:                raise NeedsBuildError(self)        else:            self.pull()    def image(self):        try:            return self.client.inspect_image(self.image_name)        except APIError as e:            if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):                raise NoSuchImageError("Image '{}' not found".format(self.image_name))            else:                raise    @property    def image_name(self):        if self.can_be_built():            return self.full_name        else:            return self.options['image']    def convergence_plan(self, strategy=ConvergenceStrategy.changed):        containers = self.containers(stopped=True)        if not containers:            return ConvergencePlan('create', [])        if strategy is ConvergenceStrategy.never:            return ConvergencePlan('start', containers)        if (            strategy is ConvergenceStrategy.always or            self._containers_have_diverged(containers)        ):            return ConvergencePlan('recreate', containers)        stopped = [c for c in containers if not c.is_running]        if stopped:            return ConvergencePlan('start', stopped)        return ConvergencePlan('noop', containers)    def _containers_have_diverged(self, containers):        config_hash = None        try:            config_hash = self.config_hash        except NoSuchImageError as e:            log.debug(                'Service %s has diverged: %s',                self.name, six.text_type(e),            )            return True        has_diverged = False        for c in containers:            container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)            if container_config_hash != config_hash:                log.debug(                    '%s has diverged: %s != %s',                    c.name, container_config_hash, config_hash,                )                has_diverged = True        return has_diverged    def execute_convergence_plan(self,                                 plan,                                 do_build=True,                                 timeout=DEFAULT_TIMEOUT):        (action, containers) = plan        if action == 'create':            container = self.create_container(do_build=do_build)            self.start_container(container)            return [container]        elif action == 'recreate':            return [                self.recreate_container(                    c,                    timeout=timeout                )                for c in containers            ]        elif action == 'start':            for c in containers:                self.start_container_if_stopped(c)            return containers        elif action == 'noop':            for c in containers:                log.info("%s is up-to-date" % c.name)            return containers        else:            raise Exception("Invalid action: {}".format(action))    def recreate_container(self,                           container,                           timeout=DEFAULT_TIMEOUT):        """Recreate a container.        The original container is renamed to a temporary name so that data        volumes can be copied to the new container, before the original        container is removed.        """        log.info("Recreating %s" % container.name)        try:            container.stop(timeout=timeout)        except APIError as e:            if (e.response.status_code == 500                    and e.explanation                    and 'no such process' in str(e.explanation)):                pass            else:                raise        # Use a hopefully unique container name by prepending the short id        self.client.rename(            container.id,            '%s_%s' % (container.short_id, container.name))        new_container = self.create_container(            do_build=False,            previous_container=container,            number=container.labels.get(LABEL_CONTAINER_NUMBER),            quiet=True,        )        self.start_container(new_container)        container.remove()        return new_container    def start_container_if_stopped(self, container):        if container.is_running:            return container        else:            log.info("Starting %s" % container.name)            return self.start_container(container)    def start_container(self, container):        container.start()        return container    def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):        for c in self.duplicate_containers():            log.info('Removing %s' % c.name)            c.stop(timeout=timeout)            c.remove()    def duplicate_containers(self):        containers = sorted(            self.containers(stopped=True),            key=lambda c: c.get('Created'),        )        numbers = set()        for c in containers:            if c.number in numbers:                yield c            else:                numbers.add(c.number)    @property    def config_hash(self):        return json_hash(self.config_dict())    def config_dict(self):        return {            'options': self.options,            'image_id': self.image()['Id'],            'links': self.get_link_names(),            'net': self.net.id,            'volumes_from': self.get_volumes_from_names(),        }    def get_dependency_names(self):        net_name = self.net.service_name        return (self.get_linked_service_names() +                self.get_volumes_from_names() +                ([net_name] if net_name else []))    def get_linked_service_names(self):        return [service.name for (service, _) in self.links]    def get_link_names(self):        return [(service.name, alias) for service, alias in self.links]    def get_volumes_from_names(self):        return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]    def get_container_name(self, number, one_off=False):        # TODO: Implement issue #652 here        return build_container_name(self.project, self.name, number, one_off)    # TODO: this would benefit from github.com/docker/docker/pull/11943    # to remove the need to inspect every container    def _next_container_number(self, one_off=False):        containers = filter(None, [            Container.from_ps(self.client, container)            for container in self.client.containers(                all=True,                filters={'label': self.labels(one_off=one_off)})        ])        numbers = [c.number for c in containers]        return 1 if not numbers else max(numbers) + 1    def _get_links(self, link_to_self):        links = []        for service, link_name in self.links:            for container in service.containers():                links.append((container.name, link_name or service.name))                links.append((container.name, container.name))                links.append((container.name, container.name_without_project))        if link_to_self:            for container in self.containers():                links.append((container.name, self.name))                links.append((container.name, container.name))                links.append((container.name, container.name_without_project))        for external_link in self.options.get('external_links') or []:            if ':' not in external_link:                link_name = external_link            else:                external_link, link_name = external_link.split(':')            links.append((external_link, link_name))        return links    def _get_volumes_from(self):        volumes_from = []        for volume_from_spec in self.volumes_from:            volumes = build_volume_from(volume_from_spec)            volumes_from.extend(volumes)        return volumes_from    def _get_container_create_options(            self,            override_options,            number,            one_off=False,            previous_container=None):        add_config_hash = (not one_off and not override_options)        container_options = dict(            (k, self.options[k])            for k in DOCKER_CONFIG_KEYS if k in self.options)        container_options.update(override_options)        if self.custom_container_name() and not one_off:            container_options['name'] = self.custom_container_name()        elif not container_options.get('name'):            container_options['name'] = self.get_container_name(number, one_off)        if 'detach' not in container_options:            container_options['detach'] = True        # If a qualified hostname was given, split it into an        # unqualified hostname and a domainname unless domainname        # was also given explicitly. This matches the behavior of        # the official Docker CLI in that scenario.        if ('hostname' in container_options                and 'domainname' not in container_options                and '.' in container_options['hostname']):            parts = container_options['hostname'].partition('.')            container_options['hostname'] = parts[0]            container_options['domainname'] = parts[2]        if 'hostname' not in container_options and self.use_networking:            container_options['hostname'] = self.name        if 'ports' in container_options or 'expose' in self.options:            ports = []            all_ports = container_options.get('ports', []) + self.options.get('expose', [])            for port_range in all_ports:                internal_range, _ = split_port(port_range)                for port in internal_range:                    port = str(port)                    if '/' in port:                        port = tuple(port.split('/'))                    ports.append(port)            container_options['ports'] = ports        override_options['binds'] = merge_volume_bindings(            container_options.get('volumes') or [],            previous_container)        if 'volumes' in container_options:            container_options['volumes'] = dict(                (parse_volume_spec(v).internal, {})                for v in container_options['volumes'])        container_options['environment'] = merge_environment(            self.options.get('environment'),            override_options.get('environment'))        if previous_container:            container_options['environment']['affinity:container'] = ('=' + previous_container.id)        container_options['image'] = self.image_name        container_options['labels'] = build_container_labels(            container_options.get('labels', {}),            self.labels(one_off=one_off),            number,            self.config_hash if add_config_hash else None)        # Delete options which are only used when starting        for key in DOCKER_START_KEYS:            container_options.pop(key, None)        container_options['host_config'] = self._get_container_host_config(            override_options,            one_off=one_off)        return container_options    def _get_container_host_config(self, override_options, one_off=False):        options = dict(self.options, **override_options)        port_bindings = build_port_bindings(options.get('ports') or [])        privileged = options.get('privileged', False)        cap_add = options.get('cap_add', None)        cap_drop = options.get('cap_drop', None)        log_config = LogConfig(            type=options.get('log_driver', ""),            config=options.get('log_opt', None)        )        pid = options.get('pid', None)        security_opt = options.get('security_opt', None)        dns = options.get('dns', None)        if isinstance(dns, six.string_types):            dns = [dns]        dns_search = options.get('dns_search', None)        if isinstance(dns_search, six.string_types):            dns_search = [dns_search]        restart = parse_restart_spec(options.get('restart', None))        extra_hosts = build_extra_hosts(options.get('extra_hosts', None))        read_only = options.get('read_only', None)        devices = options.get('devices', None)        cgroup_parent = options.get('cgroup_parent', None)        return self.client.create_host_config(            links=self._get_links(link_to_self=one_off),            port_bindings=port_bindings,            binds=options.get('binds'),            volumes_from=self._get_volumes_from(),            privileged=privileged,            network_mode=self.net.mode,            devices=devices,            dns=dns,            dns_search=dns_search,            restart_policy=restart,            cap_add=cap_add,            cap_drop=cap_drop,            mem_limit=options.get('mem_limit'),            memswap_limit=options.get('memswap_limit'),            log_config=log_config,            extra_hosts=extra_hosts,            read_only=read_only,            pid_mode=pid,            security_opt=security_opt,            ipc_mode=options.get('ipc'),            cgroup_parent=cgroup_parent        )    def build(self, no_cache=False, pull=False):        log.info('Building %s' % self.name)        path = self.options['build']        # python2 os.path() doesn't support unicode, so we need to encode it to        # a byte string        if not six.PY3:            path = path.encode('utf8')        build_output = self.client.build(            path=path,            tag=self.image_name,            stream=True,            rm=True,            pull=pull,            nocache=no_cache,            dockerfile=self.options.get('dockerfile', None),        )        try:            all_events = stream_output(build_output, sys.stdout)        except StreamOutputError as e:            raise BuildError(self, six.text_type(e))        # Ensure the HTTP connection is not reused for another        # streaming command, as the Docker daemon can sometimes        # complain about it        self.client.close()        image_id = None        for event in all_events:            if 'stream' in event:                match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))                if match:                    image_id = match.group(1)        if image_id is None:            raise BuildError(self, event if all_events else 'Unknown')        return image_id    def can_be_built(self):        return 'build' in self.options    @property    def full_name(self):        """        The tag to give to images built for this service.        """        return '%s_%s' % (self.project, self.name)    def labels(self, one_off=False):        return [            '{0}={1}'.format(LABEL_PROJECT, self.project),            '{0}={1}'.format(LABEL_SERVICE, self.name),            '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")        ]    def custom_container_name(self):        return self.options.get('container_name')    def specifies_host_port(self):        for port in self.options.get('ports', []):            if ':' in str(port):                return True        return False    def pull(self, ignore_pull_failures=False):        if 'image' not in self.options:            return        repo, tag, separator = parse_repository_tag(self.options['image'])        tag = tag or 'latest'        log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))        output = self.client.pull(            repo,            tag=tag,            stream=True,        )        try:            stream_output(output, sys.stdout)        except StreamOutputError as e:            if not ignore_pull_failures:                raise            else:                log.error(six.text_type(e))class Net(object):    """A `standard` network mode (ex: host, bridge)"""    service_name = None    def __init__(self, net):        self.net = net    @property    def id(self):        return self.net    mode = idclass ContainerNet(object):    """A network mode that uses a container's network stack."""    service_name = None    def __init__(self, container):        self.container = container    @property    def id(self):        return self.container.id    @property    def mode(self):        return 'container:' + self.container.idclass ServiceNet(object):    """A network mode that uses a service's network stack."""    def __init__(self, service):        self.service = service    @property    def id(self):        return self.service.name    service_name = id    @property    def mode(self):        containers = self.service.containers()        if containers:            return 'container:' + containers[0].id        log.warn("Warning: Service %s is trying to use reuse the network stack "                 "of another service that is not running." % (self.id))        return None# Namesdef build_container_name(project, service, number, one_off=False):    bits = [project, service]    if one_off:        bits.append('run')    return '_'.join(bits + [str(number)])# Imagesdef parse_repository_tag(repo_path):    """Splits image identification into base image path, tag/digest    and it's separator.    Example:    >>> parse_repository_tag('user/repo@sha256:digest')    ('user/repo', 'sha256:digest', '@')    >>> parse_repository_tag('user/repo:v1')    ('user/repo', 'v1', ':')    """    tag_separator = ":"    digest_separator = "@"    if digest_separator in repo_path:        repo, tag = repo_path.rsplit(digest_separator, 1)        return repo, tag, digest_separator    repo, tag = repo_path, ""    if tag_separator in repo_path:        repo, tag = repo_path.rsplit(tag_separator, 1)        if "/" in tag:            repo, tag = repo_path, ""    return repo, tag, tag_separator# Volumesdef merge_volume_bindings(volumes_option, previous_container):    """Return a list of volume bindings for a container. Container data volumes    are replaced by those from the previous container.    """    volume_bindings = dict(        build_volume_binding(parse_volume_spec(volume))        for volume in volumes_option or []        if ':' in volume)    if previous_container:        volume_bindings.update(            get_container_data_volumes(previous_container, volumes_option))    return list(volume_bindings.values())def get_container_data_volumes(container, volumes_option):    """Find the container data volumes that are in `volumes_option`, and return    a mapping of volume bindings for those volumes.    """    volumes = []    volumes_option = volumes_option or []    container_volumes = container.get('Volumes') or {}    image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}    for volume in set(volumes_option + list(image_volumes)):        volume = parse_volume_spec(volume)        # No need to preserve host volumes        if volume.external:            continue        volume_path = container_volumes.get(volume.internal)        # New volume, doesn't exist in the old container        if not volume_path:            continue        # Copy existing volume from old container        volume = volume._replace(external=volume_path)        volumes.append(build_volume_binding(volume))    return dict(volumes)def build_volume_binding(volume_spec):    return volume_spec.internal, "{}:{}:{}".format(*volume_spec)def normalize_paths_for_engine(external_path, internal_path):    """    Windows paths, c:\my\path\shiny, need to be changed to be compatible with    the Engine. Volume paths are expected to be linux style /c/my/path/shiny/    """    if IS_WINDOWS_PLATFORM:        if external_path:            drive, tail = os.path.splitdrive(external_path)            if drive:                reformatted_drive = "/{}".format(drive.replace(":", ""))                external_path = reformatted_drive + tail            external_path = "/".join(external_path.split("\\"))        return external_path, "/".join(internal_path.split("\\"))    else:        return external_path, internal_pathdef parse_volume_spec(volume_config):    """    Parse a volume_config path and split it into external:internal[:mode]    parts to be returned as a valid VolumeSpec.    """    if IS_WINDOWS_PLATFORM:        # relative paths in windows expand to include the drive, eg C:\        # so we join the first 2 parts back together to count as one        drive, tail = os.path.splitdrive(volume_config)        parts = tail.split(":")        if drive:            parts[0] = drive + parts[0]    else:        parts = volume_config.split(':')    if len(parts) > 3:        raise ConfigError("Volume %s has incorrect format, should be "                          "external:internal[:mode]" % volume_config)    if len(parts) == 1:        external, internal = normalize_paths_for_engine(None, os.path.normpath(parts[0]))    else:        external, internal = normalize_paths_for_engine(os.path.normpath(parts[0]), os.path.normpath(parts[1]))    mode = 'rw'    if len(parts) == 3:        mode = parts[2]    return VolumeSpec(external, internal, mode)def build_volume_from(volume_from_spec):    """    volume_from can be either a service or a container. We want to return the    container.id and format it into a string complete with the mode.    """    if isinstance(volume_from_spec.source, Service):        containers = volume_from_spec.source.containers(stopped=True)        if not containers:            return ["{}:{}".format(volume_from_spec.source.create_container().id, volume_from_spec.mode)]        container = containers[0]        return ["{}:{}".format(container.id, volume_from_spec.mode)]    elif isinstance(volume_from_spec.source, Container):        return ["{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)]def parse_volume_from_spec(volume_from_config):    parts = volume_from_config.split(':')    if len(parts) > 2:        raise ConfigError("Volume %s has incorrect format, should be "                          "external:internal[:mode]" % volume_from_config)    if len(parts) == 1:        source = parts[0]        mode = 'rw'    else:        source, mode = parts    return VolumeFromSpec(source, mode)# Labelsdef build_container_labels(label_options, service_labels, number, config_hash):    labels = dict(label_options or {})    labels.update(label.split('=', 1) for label in service_labels)    labels[LABEL_CONTAINER_NUMBER] = str(number)    labels[LABEL_VERSION] = __version__    if config_hash:        log.debug("Added config hash: %s" % config_hash)        labels[LABEL_CONFIG_HASH] = config_hash    return labels# Restart policydef parse_restart_spec(restart_config):    if not restart_config:        return None    parts = restart_config.split(':')    if len(parts) > 2:        raise ConfigError("Restart %s has incorrect format, should be "                          "mode[:max_retry]" % restart_config)    if len(parts) == 2:        name, max_retry_count = parts    else:        name, = parts        max_retry_count = 0    return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}# Extra hostsdef build_extra_hosts(extra_hosts_config):    if not extra_hosts_config:        return {}    if isinstance(extra_hosts_config, list):        extra_hosts_dict = {}        for extra_hosts_line in extra_hosts_config:            if not isinstance(extra_hosts_line, six.string_types):                raise ConfigError(                    "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %                    extra_hosts_config                )            host, ip = extra_hosts_line.split(':')            extra_hosts_dict.update({host.strip(): ip.strip()})        extra_hosts_config = extra_hosts_dict    if isinstance(extra_hosts_config, dict):        return extra_hosts_config    raise ConfigError(        "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %        extra_hosts_config    )
 |