service.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from collections import namedtuple
  4. import logging
  5. import re
  6. import sys
  7. from operator import attrgetter
  8. import six
  9. from docker.errors import APIError
  10. from docker.utils import create_host_config, LogConfig
  11. from . import __version__
  12. from .config import DOCKER_CONFIG_KEYS, merge_environment
  13. from .const import (
  14. LABEL_CONTAINER_NUMBER,
  15. LABEL_ONE_OFF,
  16. LABEL_PROJECT,
  17. LABEL_SERVICE,
  18. LABEL_VERSION,
  19. LABEL_CONFIG_HASH,
  20. )
  21. from .container import Container, get_container_name
  22. from .progress_stream import stream_output, StreamOutputError
  23. from .utils import json_hash
  24. log = logging.getLogger(__name__)
  25. DOCKER_START_KEYS = [
  26. 'cap_add',
  27. 'cap_drop',
  28. 'devices',
  29. 'dns',
  30. 'dns_search',
  31. 'env_file',
  32. 'extra_hosts',
  33. 'read_only',
  34. 'net',
  35. 'log_driver',
  36. 'pid',
  37. 'privileged',
  38. 'restart',
  39. 'volumes_from',
  40. ]
  41. VALID_NAME_CHARS = '[a-zA-Z0-9]'
  42. class BuildError(Exception):
  43. def __init__(self, service, reason):
  44. self.service = service
  45. self.reason = reason
  46. class CannotBeScaledError(Exception):
  47. pass
  48. class ConfigError(ValueError):
  49. pass
  50. class NeedsBuildError(Exception):
  51. def __init__(self, service):
  52. self.service = service
  53. VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
  54. ServiceName = namedtuple('ServiceName', 'project service number')
  55. ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
  56. class Service(object):
  57. def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
  58. if not re.match('^%s+$' % VALID_NAME_CHARS, name):
  59. raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
  60. if not re.match('^%s+$' % VALID_NAME_CHARS, project):
  61. raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
  62. if 'image' in options and 'build' in options:
  63. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  64. if 'image' not in options and 'build' not in options:
  65. raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
  66. self.name = name
  67. self.client = client
  68. self.project = project
  69. self.links = links or []
  70. self.external_links = external_links or []
  71. self.volumes_from = volumes_from or []
  72. self.net = net or None
  73. self.options = options
  74. def containers(self, stopped=False, one_off=False):
  75. containers = [
  76. Container.from_ps(self.client, container)
  77. for container in self.client.containers(
  78. all=stopped,
  79. filters={'label': self.labels(one_off=one_off)})]
  80. if not containers:
  81. check_for_legacy_containers(
  82. self.client,
  83. self.project,
  84. [self.name],
  85. stopped=stopped,
  86. one_off=one_off)
  87. return containers
  88. def get_container(self, number=1):
  89. """Return a :class:`compose.container.Container` for this service. The
  90. container must be active, and match `number`.
  91. """
  92. labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
  93. for container in self.client.containers(filters={'label': labels}):
  94. return Container.from_ps(self.client, container)
  95. raise ValueError("No container found for %s_%s" % (self.name, number))
  96. def start(self, **options):
  97. for c in self.containers(stopped=True):
  98. self.start_container_if_stopped(c, **options)
  99. def stop(self, **options):
  100. for c in self.containers():
  101. log.info("Stopping %s..." % c.name)
  102. c.stop(**options)
  103. def kill(self, **options):
  104. for c in self.containers():
  105. log.info("Killing %s..." % c.name)
  106. c.kill(**options)
  107. def restart(self, **options):
  108. for c in self.containers():
  109. log.info("Restarting %s..." % c.name)
  110. c.restart(**options)
  111. def scale(self, desired_num):
  112. """
  113. Adjusts the number of containers to the specified number and ensures
  114. they are running.
  115. - creates containers until there are at least `desired_num`
  116. - stops containers until there are at most `desired_num` running
  117. - starts containers until there are at least `desired_num` running
  118. - removes all stopped containers
  119. """
  120. if not self.can_be_scaled():
  121. raise CannotBeScaledError()
  122. # Create enough containers
  123. containers = self.containers(stopped=True)
  124. while len(containers) < desired_num:
  125. containers.append(self.create_container())
  126. running_containers = []
  127. stopped_containers = []
  128. for c in containers:
  129. if c.is_running:
  130. running_containers.append(c)
  131. else:
  132. stopped_containers.append(c)
  133. running_containers.sort(key=lambda c: c.number)
  134. stopped_containers.sort(key=lambda c: c.number)
  135. # Stop containers
  136. while len(running_containers) > desired_num:
  137. c = running_containers.pop()
  138. log.info("Stopping %s..." % c.name)
  139. c.stop(timeout=1)
  140. stopped_containers.append(c)
  141. # Start containers
  142. while len(running_containers) < desired_num:
  143. c = stopped_containers.pop(0)
  144. log.info("Starting %s..." % c.name)
  145. self.start_container(c)
  146. running_containers.append(c)
  147. self.remove_stopped()
  148. def remove_stopped(self, **options):
  149. for c in self.containers(stopped=True):
  150. if not c.is_running:
  151. log.info("Removing %s..." % c.name)
  152. c.remove(**options)
  153. def create_container(self,
  154. one_off=False,
  155. insecure_registry=False,
  156. do_build=True,
  157. previous_container=None,
  158. number=None,
  159. **override_options):
  160. """
  161. Create a container for this service. If the image doesn't exist, attempt to pull
  162. it.
  163. """
  164. self.ensure_image_exists(
  165. do_build=do_build,
  166. insecure_registry=insecure_registry,
  167. )
  168. container_options = self._get_container_create_options(
  169. override_options,
  170. number or self._next_container_number(one_off=one_off),
  171. one_off=one_off,
  172. previous_container=previous_container,
  173. )
  174. return Container.create(self.client, **container_options)
  175. def ensure_image_exists(self,
  176. do_build=True,
  177. insecure_registry=False):
  178. if self.image():
  179. return
  180. if self.can_be_built():
  181. if do_build:
  182. self.build()
  183. else:
  184. raise NeedsBuildError(self)
  185. else:
  186. self.pull(insecure_registry=insecure_registry)
  187. def image(self):
  188. try:
  189. return self.client.inspect_image(self.image_name)
  190. except APIError as e:
  191. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  192. return None
  193. else:
  194. raise
  195. @property
  196. def image_name(self):
  197. if self.can_be_built():
  198. return self.full_name
  199. else:
  200. return self.options['image']
  201. def converge(self,
  202. allow_recreate=True,
  203. smart_recreate=False,
  204. insecure_registry=False,
  205. do_build=True):
  206. """
  207. If a container for this service doesn't exist, create and start one. If there are
  208. any, stop them, create+start new ones, and remove the old containers.
  209. """
  210. plan = self.convergence_plan(
  211. allow_recreate=allow_recreate,
  212. smart_recreate=smart_recreate,
  213. )
  214. return self.execute_convergence_plan(
  215. plan,
  216. insecure_registry=insecure_registry,
  217. do_build=do_build,
  218. )
  219. def convergence_plan(self,
  220. allow_recreate=True,
  221. smart_recreate=False):
  222. containers = self.containers(stopped=True)
  223. if not containers:
  224. return ConvergencePlan('create', [])
  225. if smart_recreate and not self._containers_have_diverged(containers):
  226. stopped = [c for c in containers if not c.is_running]
  227. if stopped:
  228. return ConvergencePlan('start', stopped)
  229. return ConvergencePlan('noop', containers)
  230. if not allow_recreate:
  231. return ConvergencePlan('start', containers)
  232. return ConvergencePlan('recreate', containers)
  233. def recreate_plan(self):
  234. containers = self.containers(stopped=True)
  235. return ConvergencePlan('recreate', containers)
  236. def _containers_have_diverged(self, containers):
  237. config_hash = self.config_hash()
  238. has_diverged = False
  239. for c in containers:
  240. container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
  241. if container_config_hash != config_hash:
  242. log.debug(
  243. '%s has diverged: %s != %s',
  244. c.name, container_config_hash, config_hash,
  245. )
  246. has_diverged = True
  247. return has_diverged
  248. def execute_convergence_plan(self,
  249. plan,
  250. insecure_registry=False,
  251. do_build=True):
  252. (action, containers) = plan
  253. if action == 'create':
  254. container = self.create_container(
  255. insecure_registry=insecure_registry,
  256. do_build=do_build,
  257. )
  258. self.start_container(container)
  259. return [container]
  260. elif action == 'recreate':
  261. return [
  262. self.recreate_container(
  263. c,
  264. insecure_registry=insecure_registry,
  265. )
  266. for c in containers
  267. ]
  268. elif action == 'start':
  269. for c in containers:
  270. self.start_container_if_stopped(c)
  271. return containers
  272. elif action == 'noop':
  273. for c in containers:
  274. log.info("%s is up-to-date" % c.name)
  275. return containers
  276. else:
  277. raise Exception("Invalid action: {}".format(action))
  278. def recreate_container(self,
  279. container,
  280. insecure_registry=False):
  281. """Recreate a container.
  282. The original container is renamed to a temporary name so that data
  283. volumes can be copied to the new container, before the original
  284. container is removed.
  285. """
  286. log.info("Recreating %s..." % container.name)
  287. try:
  288. container.stop()
  289. except APIError as e:
  290. if (e.response.status_code == 500
  291. and e.explanation
  292. and 'no such process' in str(e.explanation)):
  293. pass
  294. else:
  295. raise
  296. # Use a hopefully unique container name by prepending the short id
  297. self.client.rename(
  298. container.id,
  299. '%s_%s' % (container.short_id, container.name))
  300. new_container = self.create_container(
  301. insecure_registry=insecure_registry,
  302. do_build=False,
  303. previous_container=container,
  304. number=container.labels.get(LABEL_CONTAINER_NUMBER),
  305. )
  306. self.start_container(new_container)
  307. container.remove()
  308. return new_container
  309. def start_container_if_stopped(self, container):
  310. if container.is_running:
  311. return container
  312. else:
  313. log.info("Starting %s..." % container.name)
  314. return self.start_container(container)
  315. def start_container(self, container):
  316. container.start()
  317. return container
  318. def start_or_create_containers(
  319. self,
  320. insecure_registry=False,
  321. do_build=True):
  322. containers = self.containers(stopped=True)
  323. if not containers:
  324. new_container = self.create_container(
  325. insecure_registry=insecure_registry,
  326. do_build=do_build,
  327. )
  328. return [self.start_container(new_container)]
  329. else:
  330. return [self.start_container_if_stopped(c) for c in containers]
  331. def config_hash(self):
  332. return json_hash(self.config_dict())
  333. def config_dict(self):
  334. return {
  335. 'options': self.options,
  336. 'image_id': self.image()['Id'],
  337. }
  338. def get_dependency_names(self):
  339. net_name = self.get_net_name()
  340. return (self.get_linked_names() +
  341. self.get_volumes_from_names() +
  342. ([net_name] if net_name else []))
  343. def get_linked_names(self):
  344. return [s.name for (s, _) in self.links]
  345. def get_volumes_from_names(self):
  346. return [s.name for s in self.volumes_from if isinstance(s, Service)]
  347. def get_net_name(self):
  348. if isinstance(self.net, Service):
  349. return self.net.name
  350. else:
  351. return
  352. def get_container_name(self, number, one_off=False):
  353. # TODO: Implement issue #652 here
  354. return build_container_name(self.project, self.name, number, one_off)
  355. # TODO: this would benefit from github.com/docker/docker/pull/11943
  356. # to remove the need to inspect every container
  357. def _next_container_number(self, one_off=False):
  358. numbers = [
  359. Container.from_ps(self.client, container).number
  360. for container in self.client.containers(
  361. all=True,
  362. filters={'label': self.labels(one_off=one_off)})
  363. ]
  364. return 1 if not numbers else max(numbers) + 1
  365. def _get_links(self, link_to_self):
  366. links = []
  367. for service, link_name in self.links:
  368. for container in service.containers():
  369. links.append((container.name, link_name or service.name))
  370. links.append((container.name, container.name))
  371. links.append((container.name, container.name_without_project))
  372. if link_to_self:
  373. for container in self.containers():
  374. links.append((container.name, self.name))
  375. links.append((container.name, container.name))
  376. links.append((container.name, container.name_without_project))
  377. for external_link in self.external_links:
  378. if ':' not in external_link:
  379. link_name = external_link
  380. else:
  381. external_link, link_name = external_link.split(':')
  382. links.append((external_link, link_name))
  383. return links
  384. def _get_volumes_from(self):
  385. volumes_from = []
  386. for volume_source in self.volumes_from:
  387. if isinstance(volume_source, Service):
  388. containers = volume_source.containers(stopped=True)
  389. if not containers:
  390. volumes_from.append(volume_source.create_container().id)
  391. else:
  392. volumes_from.extend(map(attrgetter('id'), containers))
  393. elif isinstance(volume_source, Container):
  394. volumes_from.append(volume_source.id)
  395. return volumes_from
  396. def _get_net(self):
  397. if not self.net:
  398. return "bridge"
  399. if isinstance(self.net, Service):
  400. containers = self.net.containers()
  401. if len(containers) > 0:
  402. net = 'container:' + containers[0].id
  403. else:
  404. log.warning("Warning: Service %s is trying to use reuse the network stack "
  405. "of another service that is not running." % (self.net.name))
  406. net = None
  407. elif isinstance(self.net, Container):
  408. net = 'container:' + self.net.id
  409. else:
  410. net = self.net
  411. return net
  412. def _get_container_create_options(
  413. self,
  414. override_options,
  415. number,
  416. one_off=False,
  417. previous_container=None):
  418. add_config_hash = (not one_off and not override_options)
  419. container_options = dict(
  420. (k, self.options[k])
  421. for k in DOCKER_CONFIG_KEYS if k in self.options)
  422. container_options.update(override_options)
  423. container_options['name'] = self.get_container_name(number, one_off)
  424. if add_config_hash:
  425. config_hash = self.config_hash()
  426. if 'labels' not in container_options:
  427. container_options['labels'] = {}
  428. container_options['labels'][LABEL_CONFIG_HASH] = config_hash
  429. log.debug("Added config hash: %s" % config_hash)
  430. if 'detach' not in container_options:
  431. container_options['detach'] = True
  432. # If a qualified hostname was given, split it into an
  433. # unqualified hostname and a domainname unless domainname
  434. # was also given explicitly. This matches the behavior of
  435. # the official Docker CLI in that scenario.
  436. if ('hostname' in container_options
  437. and 'domainname' not in container_options
  438. and '.' in container_options['hostname']):
  439. parts = container_options['hostname'].partition('.')
  440. container_options['hostname'] = parts[0]
  441. container_options['domainname'] = parts[2]
  442. if 'ports' in container_options or 'expose' in self.options:
  443. ports = []
  444. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  445. for port in all_ports:
  446. port = str(port)
  447. if ':' in port:
  448. port = port.split(':')[-1]
  449. if '/' in port:
  450. port = tuple(port.split('/'))
  451. ports.append(port)
  452. container_options['ports'] = ports
  453. override_options['binds'] = merge_volume_bindings(
  454. container_options.get('volumes') or [],
  455. previous_container)
  456. if 'volumes' in container_options:
  457. container_options['volumes'] = dict(
  458. (parse_volume_spec(v).internal, {})
  459. for v in container_options['volumes'])
  460. container_options['environment'] = merge_environment(
  461. self.options.get('environment'),
  462. override_options.get('environment'))
  463. if previous_container:
  464. container_options['environment']['affinity:container'] = ('=' + previous_container.id)
  465. container_options['image'] = self.image_name
  466. container_options['labels'] = build_container_labels(
  467. container_options.get('labels', {}),
  468. self.labels(one_off=one_off),
  469. number)
  470. # Delete options which are only used when starting
  471. for key in DOCKER_START_KEYS:
  472. container_options.pop(key, None)
  473. container_options['host_config'] = self._get_container_host_config(
  474. override_options,
  475. one_off=one_off)
  476. return container_options
  477. def _get_container_host_config(self, override_options, one_off=False):
  478. options = dict(self.options, **override_options)
  479. port_bindings = build_port_bindings(options.get('ports') or [])
  480. privileged = options.get('privileged', False)
  481. cap_add = options.get('cap_add', None)
  482. cap_drop = options.get('cap_drop', None)
  483. log_config = LogConfig(type=options.get('log_driver', 'json-file'))
  484. pid = options.get('pid', None)
  485. dns = options.get('dns', None)
  486. if isinstance(dns, six.string_types):
  487. dns = [dns]
  488. dns_search = options.get('dns_search', None)
  489. if isinstance(dns_search, six.string_types):
  490. dns_search = [dns_search]
  491. restart = parse_restart_spec(options.get('restart', None))
  492. extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
  493. read_only = options.get('read_only', None)
  494. devices = options.get('devices', None)
  495. return create_host_config(
  496. links=self._get_links(link_to_self=one_off),
  497. port_bindings=port_bindings,
  498. binds=options.get('binds'),
  499. volumes_from=self._get_volumes_from(),
  500. privileged=privileged,
  501. network_mode=self._get_net(),
  502. devices=devices,
  503. dns=dns,
  504. dns_search=dns_search,
  505. restart_policy=restart,
  506. cap_add=cap_add,
  507. cap_drop=cap_drop,
  508. log_config=log_config,
  509. extra_hosts=extra_hosts,
  510. read_only=read_only,
  511. pid_mode=pid
  512. )
  513. def build(self, no_cache=False):
  514. log.info('Building %s...' % self.name)
  515. path = six.binary_type(self.options['build'])
  516. build_output = self.client.build(
  517. path=path,
  518. tag=self.image_name,
  519. stream=True,
  520. rm=True,
  521. nocache=no_cache,
  522. dockerfile=self.options.get('dockerfile', None),
  523. )
  524. try:
  525. all_events = stream_output(build_output, sys.stdout)
  526. except StreamOutputError as e:
  527. raise BuildError(self, unicode(e))
  528. # Ensure the HTTP connection is not reused for another
  529. # streaming command, as the Docker daemon can sometimes
  530. # complain about it
  531. self.client.close()
  532. image_id = None
  533. for event in all_events:
  534. if 'stream' in event:
  535. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  536. if match:
  537. image_id = match.group(1)
  538. if image_id is None:
  539. raise BuildError(self, event if all_events else 'Unknown')
  540. return image_id
  541. def can_be_built(self):
  542. return 'build' in self.options
  543. @property
  544. def full_name(self):
  545. """
  546. The tag to give to images built for this service.
  547. """
  548. return '%s_%s' % (self.project, self.name)
  549. def labels(self, one_off=False):
  550. return [
  551. '{0}={1}'.format(LABEL_PROJECT, self.project),
  552. '{0}={1}'.format(LABEL_SERVICE, self.name),
  553. '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
  554. ]
  555. def can_be_scaled(self):
  556. for port in self.options.get('ports', []):
  557. if ':' in str(port):
  558. return False
  559. return True
  560. def pull(self, insecure_registry=False):
  561. if 'image' not in self.options:
  562. return
  563. repo, tag = parse_repository_tag(self.options['image'])
  564. tag = tag or 'latest'
  565. log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
  566. output = self.client.pull(
  567. repo,
  568. tag=tag,
  569. stream=True,
  570. insecure_registry=insecure_registry)
  571. stream_output(output, sys.stdout)
  572. def get_container_data_volumes(container, volumes_option):
  573. """Find the container data volumes that are in `volumes_option`, and return
  574. a mapping of volume bindings for those volumes.
  575. """
  576. volumes = []
  577. volumes_option = volumes_option or []
  578. container_volumes = container.get('Volumes') or {}
  579. image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
  580. for volume in set(volumes_option + image_volumes.keys()):
  581. volume = parse_volume_spec(volume)
  582. # No need to preserve host volumes
  583. if volume.external:
  584. continue
  585. volume_path = container_volumes.get(volume.internal)
  586. # New volume, doesn't exist in the old container
  587. if not volume_path:
  588. continue
  589. # Copy existing volume from old container
  590. volume = volume._replace(external=volume_path)
  591. volumes.append(build_volume_binding(volume))
  592. return dict(volumes)
  593. def merge_volume_bindings(volumes_option, previous_container):
  594. """Return a list of volume bindings for a container. Container data volumes
  595. are replaced by those from the previous container.
  596. """
  597. volume_bindings = dict(
  598. build_volume_binding(parse_volume_spec(volume))
  599. for volume in volumes_option or []
  600. if ':' in volume)
  601. if previous_container:
  602. volume_bindings.update(
  603. get_container_data_volumes(previous_container, volumes_option))
  604. return volume_bindings
  605. def build_container_name(project, service, number, one_off=False):
  606. bits = [project, service]
  607. if one_off:
  608. bits.append('run')
  609. return '_'.join(bits + [str(number)])
  610. def build_container_labels(label_options, service_labels, number, one_off=False):
  611. labels = label_options or {}
  612. labels.update(label.split('=', 1) for label in service_labels)
  613. labels[LABEL_CONTAINER_NUMBER] = str(number)
  614. labels[LABEL_VERSION] = __version__
  615. return labels
  616. def check_for_legacy_containers(
  617. client,
  618. project,
  619. services,
  620. stopped=False,
  621. one_off=False):
  622. """Check if there are containers named using the old naming convention
  623. and warn the user that those containers may need to be migrated to
  624. using labels, so that compose can find them.
  625. """
  626. for container in client.containers(all=stopped):
  627. name = get_container_name(container)
  628. for service in services:
  629. prefix = '%s_%s_%s' % (project, service, 'run_' if one_off else '')
  630. if not name.startswith(prefix):
  631. continue
  632. log.warn(
  633. "Compose found a found a container named %s without any "
  634. "labels. As of compose 1.3.0 containers are identified with "
  635. "labels instead of naming convention. If you'd like compose "
  636. "to use this container, please run "
  637. "`docker-compose migrate_to_labels`" % (name,))
  638. def parse_restart_spec(restart_config):
  639. if not restart_config:
  640. return None
  641. parts = restart_config.split(':')
  642. if len(parts) > 2:
  643. raise ConfigError("Restart %s has incorrect format, should be "
  644. "mode[:max_retry]" % restart_config)
  645. if len(parts) == 2:
  646. name, max_retry_count = parts
  647. else:
  648. name, = parts
  649. max_retry_count = 0
  650. return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
  651. def parse_volume_spec(volume_config):
  652. parts = volume_config.split(':')
  653. if len(parts) > 3:
  654. raise ConfigError("Volume %s has incorrect format, should be "
  655. "external:internal[:mode]" % volume_config)
  656. if len(parts) == 1:
  657. return VolumeSpec(None, parts[0], 'rw')
  658. if len(parts) == 2:
  659. parts.append('rw')
  660. external, internal, mode = parts
  661. if mode not in ('rw', 'ro'):
  662. raise ConfigError("Volume %s has invalid mode (%s), should be "
  663. "one of: rw, ro." % (volume_config, mode))
  664. return VolumeSpec(external, internal, mode)
  665. def parse_repository_tag(s):
  666. if ":" not in s:
  667. return s, ""
  668. repo, tag = s.rsplit(":", 1)
  669. if "/" in tag:
  670. return s, ""
  671. return repo, tag
  672. def build_volume_binding(volume_spec):
  673. internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
  674. return volume_spec.external, internal
  675. def build_port_bindings(ports):
  676. port_bindings = {}
  677. for port in ports:
  678. internal_port, external = split_port(port)
  679. if internal_port in port_bindings:
  680. port_bindings[internal_port].append(external)
  681. else:
  682. port_bindings[internal_port] = [external]
  683. return port_bindings
  684. def split_port(port):
  685. parts = str(port).split(':')
  686. if not 1 <= len(parts) <= 3:
  687. raise ConfigError('Invalid port "%s", should be '
  688. '[[remote_ip:]remote_port:]port[/protocol]' % port)
  689. if len(parts) == 1:
  690. internal_port, = parts
  691. return internal_port, None
  692. if len(parts) == 2:
  693. external_port, internal_port = parts
  694. return internal_port, external_port
  695. external_ip, external_port, internal_port = parts
  696. return internal_port, (external_ip, external_port or None)
  697. def build_extra_hosts(extra_hosts_config):
  698. if not extra_hosts_config:
  699. return {}
  700. if isinstance(extra_hosts_config, list):
  701. extra_hosts_dict = {}
  702. for extra_hosts_line in extra_hosts_config:
  703. if not isinstance(extra_hosts_line, six.string_types):
  704. raise ConfigError(
  705. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  706. extra_hosts_config
  707. )
  708. host, ip = extra_hosts_line.split(':')
  709. extra_hosts_dict.update({host.strip(): ip.strip()})
  710. extra_hosts_config = extra_hosts_dict
  711. if isinstance(extra_hosts_config, dict):
  712. return extra_hosts_config
  713. raise ConfigError(
  714. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  715. extra_hosts_config
  716. )