service.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from collections import namedtuple
  4. import logging
  5. import re
  6. import sys
  7. from operator import attrgetter
  8. import six
  9. from docker.errors import APIError
  10. from docker.utils import create_host_config, LogConfig
  11. from . import __version__
  12. from .config import DOCKER_CONFIG_KEYS, merge_environment
  13. from .const import (
  14. LABEL_CONTAINER_NUMBER,
  15. LABEL_ONE_OFF,
  16. LABEL_PROJECT,
  17. LABEL_SERVICE,
  18. LABEL_VERSION,
  19. LABEL_CONFIG_HASH,
  20. )
  21. from .container import Container, get_container_name
  22. from .progress_stream import stream_output, StreamOutputError
  23. from .utils import json_hash
  24. log = logging.getLogger(__name__)
  25. DOCKER_START_KEYS = [
  26. 'cap_add',
  27. 'cap_drop',
  28. 'devices',
  29. 'dns',
  30. 'dns_search',
  31. 'env_file',
  32. 'extra_hosts',
  33. 'read_only',
  34. 'net',
  35. 'log_driver',
  36. 'pid',
  37. 'privileged',
  38. 'restart',
  39. 'volumes_from',
  40. 'security_opt',
  41. ]
  42. VALID_NAME_CHARS = '[a-zA-Z0-9]'
  43. class BuildError(Exception):
  44. def __init__(self, service, reason):
  45. self.service = service
  46. self.reason = reason
  47. class CannotBeScaledError(Exception):
  48. pass
  49. class ConfigError(ValueError):
  50. pass
  51. class NeedsBuildError(Exception):
  52. def __init__(self, service):
  53. self.service = service
  54. VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
  55. ServiceName = namedtuple('ServiceName', 'project service number')
  56. ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
  57. class Service(object):
  58. def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
  59. if not re.match('^%s+$' % VALID_NAME_CHARS, name):
  60. raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
  61. if not re.match('^%s+$' % VALID_NAME_CHARS, project):
  62. raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
  63. if 'image' in options and 'build' in options:
  64. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  65. if 'image' not in options and 'build' not in options:
  66. raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
  67. self.name = name
  68. self.client = client
  69. self.project = project
  70. self.links = links or []
  71. self.external_links = external_links or []
  72. self.volumes_from = volumes_from or []
  73. self.net = net or None
  74. self.options = options
  75. def containers(self, stopped=False, one_off=False):
  76. containers = [
  77. Container.from_ps(self.client, container)
  78. for container in self.client.containers(
  79. all=stopped,
  80. filters={'label': self.labels(one_off=one_off)})]
  81. if not containers:
  82. check_for_legacy_containers(
  83. self.client,
  84. self.project,
  85. [self.name],
  86. stopped=stopped,
  87. one_off=one_off)
  88. return containers
  89. def get_container(self, number=1):
  90. """Return a :class:`compose.container.Container` for this service. The
  91. container must be active, and match `number`.
  92. """
  93. labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
  94. for container in self.client.containers(filters={'label': labels}):
  95. return Container.from_ps(self.client, container)
  96. raise ValueError("No container found for %s_%s" % (self.name, number))
  97. def start(self, **options):
  98. for c in self.containers(stopped=True):
  99. self.start_container_if_stopped(c, **options)
  100. def stop(self, **options):
  101. for c in self.containers():
  102. log.info("Stopping %s..." % c.name)
  103. c.stop(**options)
  104. def kill(self, **options):
  105. for c in self.containers():
  106. log.info("Killing %s..." % c.name)
  107. c.kill(**options)
  108. def restart(self, **options):
  109. for c in self.containers():
  110. log.info("Restarting %s..." % c.name)
  111. c.restart(**options)
  112. def scale(self, desired_num):
  113. """
  114. Adjusts the number of containers to the specified number and ensures
  115. they are running.
  116. - creates containers until there are at least `desired_num`
  117. - stops containers until there are at most `desired_num` running
  118. - starts containers until there are at least `desired_num` running
  119. - removes all stopped containers
  120. """
  121. if not self.can_be_scaled():
  122. raise CannotBeScaledError()
  123. # Create enough containers
  124. containers = self.containers(stopped=True)
  125. while len(containers) < desired_num:
  126. containers.append(self.create_container())
  127. running_containers = []
  128. stopped_containers = []
  129. for c in containers:
  130. if c.is_running:
  131. running_containers.append(c)
  132. else:
  133. stopped_containers.append(c)
  134. running_containers.sort(key=lambda c: c.number)
  135. stopped_containers.sort(key=lambda c: c.number)
  136. # Stop containers
  137. while len(running_containers) > desired_num:
  138. c = running_containers.pop()
  139. log.info("Stopping %s..." % c.name)
  140. c.stop(timeout=1)
  141. stopped_containers.append(c)
  142. # Start containers
  143. while len(running_containers) < desired_num:
  144. c = stopped_containers.pop(0)
  145. log.info("Starting %s..." % c.name)
  146. self.start_container(c)
  147. running_containers.append(c)
  148. self.remove_stopped()
  149. def remove_stopped(self, **options):
  150. for c in self.containers(stopped=True):
  151. if not c.is_running:
  152. log.info("Removing %s..." % c.name)
  153. c.remove(**options)
  154. def create_container(self,
  155. one_off=False,
  156. insecure_registry=False,
  157. do_build=True,
  158. previous_container=None,
  159. number=None,
  160. **override_options):
  161. """
  162. Create a container for this service. If the image doesn't exist, attempt to pull
  163. it.
  164. """
  165. self.ensure_image_exists(
  166. do_build=do_build,
  167. insecure_registry=insecure_registry,
  168. )
  169. container_options = self._get_container_create_options(
  170. override_options,
  171. number or self._next_container_number(one_off=one_off),
  172. one_off=one_off,
  173. previous_container=previous_container,
  174. )
  175. if 'name' in container_options:
  176. log.info("Creating %s..." % container_options['name'])
  177. return Container.create(self.client, **container_options)
  178. def ensure_image_exists(self,
  179. do_build=True,
  180. insecure_registry=False):
  181. if self.image():
  182. return
  183. if self.can_be_built():
  184. if do_build:
  185. self.build()
  186. else:
  187. raise NeedsBuildError(self)
  188. else:
  189. self.pull(insecure_registry=insecure_registry)
  190. def image(self):
  191. try:
  192. return self.client.inspect_image(self.image_name)
  193. except APIError as e:
  194. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  195. return None
  196. else:
  197. raise
  198. @property
  199. def image_name(self):
  200. if self.can_be_built():
  201. return self.full_name
  202. else:
  203. return self.options['image']
  204. def converge(self,
  205. allow_recreate=True,
  206. smart_recreate=False,
  207. insecure_registry=False,
  208. do_build=True):
  209. """
  210. If a container for this service doesn't exist, create and start one. If there are
  211. any, stop them, create+start new ones, and remove the old containers.
  212. """
  213. plan = self.convergence_plan(
  214. allow_recreate=allow_recreate,
  215. smart_recreate=smart_recreate,
  216. )
  217. return self.execute_convergence_plan(
  218. plan,
  219. insecure_registry=insecure_registry,
  220. do_build=do_build,
  221. )
  222. def convergence_plan(self,
  223. allow_recreate=True,
  224. smart_recreate=False):
  225. containers = self.containers(stopped=True)
  226. if not containers:
  227. return ConvergencePlan('create', [])
  228. if smart_recreate and not self._containers_have_diverged(containers):
  229. stopped = [c for c in containers if not c.is_running]
  230. if stopped:
  231. return ConvergencePlan('start', stopped)
  232. return ConvergencePlan('noop', containers)
  233. if not allow_recreate:
  234. return ConvergencePlan('start', containers)
  235. return ConvergencePlan('recreate', containers)
  236. def recreate_plan(self):
  237. containers = self.containers(stopped=True)
  238. return ConvergencePlan('recreate', containers)
  239. def _containers_have_diverged(self, containers):
  240. config_hash = self.config_hash()
  241. has_diverged = False
  242. for c in containers:
  243. container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
  244. if container_config_hash != config_hash:
  245. log.debug(
  246. '%s has diverged: %s != %s',
  247. c.name, container_config_hash, config_hash,
  248. )
  249. has_diverged = True
  250. return has_diverged
  251. def execute_convergence_plan(self,
  252. plan,
  253. insecure_registry=False,
  254. do_build=True):
  255. (action, containers) = plan
  256. if action == 'create':
  257. container = self.create_container(
  258. insecure_registry=insecure_registry,
  259. do_build=do_build,
  260. )
  261. self.start_container(container)
  262. return [container]
  263. elif action == 'recreate':
  264. return [
  265. self.recreate_container(
  266. c,
  267. insecure_registry=insecure_registry,
  268. )
  269. for c in containers
  270. ]
  271. elif action == 'start':
  272. for c in containers:
  273. self.start_container_if_stopped(c)
  274. return containers
  275. elif action == 'noop':
  276. for c in containers:
  277. log.info("%s is up-to-date" % c.name)
  278. return containers
  279. else:
  280. raise Exception("Invalid action: {}".format(action))
  281. def recreate_container(self,
  282. container,
  283. insecure_registry=False):
  284. """Recreate a container.
  285. The original container is renamed to a temporary name so that data
  286. volumes can be copied to the new container, before the original
  287. container is removed.
  288. """
  289. log.info("Recreating %s..." % container.name)
  290. try:
  291. container.stop()
  292. except APIError as e:
  293. if (e.response.status_code == 500
  294. and e.explanation
  295. and 'no such process' in str(e.explanation)):
  296. pass
  297. else:
  298. raise
  299. # Use a hopefully unique container name by prepending the short id
  300. self.client.rename(
  301. container.id,
  302. '%s_%s' % (container.short_id, container.name))
  303. new_container = self.create_container(
  304. insecure_registry=insecure_registry,
  305. do_build=False,
  306. previous_container=container,
  307. number=container.labels.get(LABEL_CONTAINER_NUMBER),
  308. )
  309. self.start_container(new_container)
  310. container.remove()
  311. return new_container
  312. def start_container_if_stopped(self, container):
  313. if container.is_running:
  314. return container
  315. else:
  316. log.info("Starting %s..." % container.name)
  317. return self.start_container(container)
  318. def start_container(self, container):
  319. container.start()
  320. return container
  321. def start_or_create_containers(
  322. self,
  323. insecure_registry=False,
  324. do_build=True):
  325. containers = self.containers(stopped=True)
  326. if not containers:
  327. new_container = self.create_container(
  328. insecure_registry=insecure_registry,
  329. do_build=do_build,
  330. )
  331. return [self.start_container(new_container)]
  332. else:
  333. return [self.start_container_if_stopped(c) for c in containers]
  334. def config_hash(self):
  335. return json_hash(self.config_dict())
  336. def config_dict(self):
  337. return {
  338. 'options': self.options,
  339. 'image_id': self.image()['Id'],
  340. }
  341. def get_dependency_names(self):
  342. net_name = self.get_net_name()
  343. return (self.get_linked_names() +
  344. self.get_volumes_from_names() +
  345. ([net_name] if net_name else []))
  346. def get_linked_names(self):
  347. return [s.name for (s, _) in self.links]
  348. def get_volumes_from_names(self):
  349. return [s.name for s in self.volumes_from if isinstance(s, Service)]
  350. def get_net_name(self):
  351. if isinstance(self.net, Service):
  352. return self.net.name
  353. else:
  354. return
  355. def get_container_name(self, number, one_off=False):
  356. # TODO: Implement issue #652 here
  357. return build_container_name(self.project, self.name, number, one_off)
  358. # TODO: this would benefit from github.com/docker/docker/pull/11943
  359. # to remove the need to inspect every container
  360. def _next_container_number(self, one_off=False):
  361. numbers = [
  362. Container.from_ps(self.client, container).number
  363. for container in self.client.containers(
  364. all=True,
  365. filters={'label': self.labels(one_off=one_off)})
  366. ]
  367. return 1 if not numbers else max(numbers) + 1
  368. def _get_links(self, link_to_self):
  369. links = []
  370. for service, link_name in self.links:
  371. for container in service.containers():
  372. links.append((container.name, link_name or service.name))
  373. links.append((container.name, container.name))
  374. links.append((container.name, container.name_without_project))
  375. if link_to_self:
  376. for container in self.containers():
  377. links.append((container.name, self.name))
  378. links.append((container.name, container.name))
  379. links.append((container.name, container.name_without_project))
  380. for external_link in self.external_links:
  381. if ':' not in external_link:
  382. link_name = external_link
  383. else:
  384. external_link, link_name = external_link.split(':')
  385. links.append((external_link, link_name))
  386. return links
  387. def _get_volumes_from(self):
  388. volumes_from = []
  389. for volume_source in self.volumes_from:
  390. if isinstance(volume_source, Service):
  391. containers = volume_source.containers(stopped=True)
  392. if not containers:
  393. volumes_from.append(volume_source.create_container().id)
  394. else:
  395. volumes_from.extend(map(attrgetter('id'), containers))
  396. elif isinstance(volume_source, Container):
  397. volumes_from.append(volume_source.id)
  398. return volumes_from
  399. def _get_net(self):
  400. if not self.net:
  401. return "bridge"
  402. if isinstance(self.net, Service):
  403. containers = self.net.containers()
  404. if len(containers) > 0:
  405. net = 'container:' + containers[0].id
  406. else:
  407. log.warning("Warning: Service %s is trying to use reuse the network stack "
  408. "of another service that is not running." % (self.net.name))
  409. net = None
  410. elif isinstance(self.net, Container):
  411. net = 'container:' + self.net.id
  412. else:
  413. net = self.net
  414. return net
  415. def _get_container_create_options(
  416. self,
  417. override_options,
  418. number,
  419. one_off=False,
  420. previous_container=None):
  421. add_config_hash = (not one_off and not override_options)
  422. container_options = dict(
  423. (k, self.options[k])
  424. for k in DOCKER_CONFIG_KEYS if k in self.options)
  425. container_options.update(override_options)
  426. container_options['name'] = self.get_container_name(number, one_off)
  427. if add_config_hash:
  428. config_hash = self.config_hash()
  429. if 'labels' not in container_options:
  430. container_options['labels'] = {}
  431. container_options['labels'][LABEL_CONFIG_HASH] = config_hash
  432. log.debug("Added config hash: %s" % config_hash)
  433. if 'detach' not in container_options:
  434. container_options['detach'] = True
  435. # If a qualified hostname was given, split it into an
  436. # unqualified hostname and a domainname unless domainname
  437. # was also given explicitly. This matches the behavior of
  438. # the official Docker CLI in that scenario.
  439. if ('hostname' in container_options
  440. and 'domainname' not in container_options
  441. and '.' in container_options['hostname']):
  442. parts = container_options['hostname'].partition('.')
  443. container_options['hostname'] = parts[0]
  444. container_options['domainname'] = parts[2]
  445. if 'ports' in container_options or 'expose' in self.options:
  446. ports = []
  447. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  448. for port in all_ports:
  449. port = str(port)
  450. if ':' in port:
  451. port = port.split(':')[-1]
  452. if '/' in port:
  453. port = tuple(port.split('/'))
  454. ports.append(port)
  455. container_options['ports'] = ports
  456. override_options['binds'] = merge_volume_bindings(
  457. container_options.get('volumes') or [],
  458. previous_container)
  459. if 'volumes' in container_options:
  460. container_options['volumes'] = dict(
  461. (parse_volume_spec(v).internal, {})
  462. for v in container_options['volumes'])
  463. container_options['environment'] = merge_environment(
  464. self.options.get('environment'),
  465. override_options.get('environment'))
  466. if previous_container:
  467. container_options['environment']['affinity:container'] = ('=' + previous_container.id)
  468. container_options['image'] = self.image_name
  469. container_options['labels'] = build_container_labels(
  470. container_options.get('labels', {}),
  471. self.labels(one_off=one_off),
  472. number)
  473. # Delete options which are only used when starting
  474. for key in DOCKER_START_KEYS:
  475. container_options.pop(key, None)
  476. container_options['host_config'] = self._get_container_host_config(
  477. override_options,
  478. one_off=one_off)
  479. return container_options
  480. def _get_container_host_config(self, override_options, one_off=False):
  481. options = dict(self.options, **override_options)
  482. port_bindings = build_port_bindings(options.get('ports') or [])
  483. privileged = options.get('privileged', False)
  484. cap_add = options.get('cap_add', None)
  485. cap_drop = options.get('cap_drop', None)
  486. log_config = LogConfig(type=options.get('log_driver', 'json-file'))
  487. pid = options.get('pid', None)
  488. security_opt = options.get('security_opt', None)
  489. dns = options.get('dns', None)
  490. if isinstance(dns, six.string_types):
  491. dns = [dns]
  492. dns_search = options.get('dns_search', None)
  493. if isinstance(dns_search, six.string_types):
  494. dns_search = [dns_search]
  495. restart = parse_restart_spec(options.get('restart', None))
  496. extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
  497. read_only = options.get('read_only', None)
  498. devices = options.get('devices', None)
  499. return create_host_config(
  500. links=self._get_links(link_to_self=one_off),
  501. port_bindings=port_bindings,
  502. binds=options.get('binds'),
  503. volumes_from=self._get_volumes_from(),
  504. privileged=privileged,
  505. network_mode=self._get_net(),
  506. devices=devices,
  507. dns=dns,
  508. dns_search=dns_search,
  509. restart_policy=restart,
  510. cap_add=cap_add,
  511. cap_drop=cap_drop,
  512. log_config=log_config,
  513. extra_hosts=extra_hosts,
  514. read_only=read_only,
  515. pid_mode=pid,
  516. security_opt=security_opt
  517. )
  518. def build(self, no_cache=False):
  519. log.info('Building %s...' % self.name)
  520. path = six.binary_type(self.options['build'])
  521. build_output = self.client.build(
  522. path=path,
  523. tag=self.image_name,
  524. stream=True,
  525. rm=True,
  526. nocache=no_cache,
  527. dockerfile=self.options.get('dockerfile', None),
  528. )
  529. try:
  530. all_events = stream_output(build_output, sys.stdout)
  531. except StreamOutputError as e:
  532. raise BuildError(self, unicode(e))
  533. # Ensure the HTTP connection is not reused for another
  534. # streaming command, as the Docker daemon can sometimes
  535. # complain about it
  536. self.client.close()
  537. image_id = None
  538. for event in all_events:
  539. if 'stream' in event:
  540. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  541. if match:
  542. image_id = match.group(1)
  543. if image_id is None:
  544. raise BuildError(self, event if all_events else 'Unknown')
  545. return image_id
  546. def can_be_built(self):
  547. return 'build' in self.options
  548. @property
  549. def full_name(self):
  550. """
  551. The tag to give to images built for this service.
  552. """
  553. return '%s_%s' % (self.project, self.name)
  554. def labels(self, one_off=False):
  555. return [
  556. '{0}={1}'.format(LABEL_PROJECT, self.project),
  557. '{0}={1}'.format(LABEL_SERVICE, self.name),
  558. '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
  559. ]
  560. def can_be_scaled(self):
  561. for port in self.options.get('ports', []):
  562. if ':' in str(port):
  563. return False
  564. return True
  565. def pull(self, insecure_registry=False):
  566. if 'image' not in self.options:
  567. return
  568. repo, tag = parse_repository_tag(self.options['image'])
  569. tag = tag or 'latest'
  570. log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
  571. output = self.client.pull(
  572. repo,
  573. tag=tag,
  574. stream=True,
  575. insecure_registry=insecure_registry)
  576. stream_output(output, sys.stdout)
  577. def get_container_data_volumes(container, volumes_option):
  578. """Find the container data volumes that are in `volumes_option`, and return
  579. a mapping of volume bindings for those volumes.
  580. """
  581. volumes = []
  582. volumes_option = volumes_option or []
  583. container_volumes = container.get('Volumes') or {}
  584. image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
  585. for volume in set(volumes_option + image_volumes.keys()):
  586. volume = parse_volume_spec(volume)
  587. # No need to preserve host volumes
  588. if volume.external:
  589. continue
  590. volume_path = container_volumes.get(volume.internal)
  591. # New volume, doesn't exist in the old container
  592. if not volume_path:
  593. continue
  594. # Copy existing volume from old container
  595. volume = volume._replace(external=volume_path)
  596. volumes.append(build_volume_binding(volume))
  597. return dict(volumes)
  598. def merge_volume_bindings(volumes_option, previous_container):
  599. """Return a list of volume bindings for a container. Container data volumes
  600. are replaced by those from the previous container.
  601. """
  602. volume_bindings = dict(
  603. build_volume_binding(parse_volume_spec(volume))
  604. for volume in volumes_option or []
  605. if ':' in volume)
  606. if previous_container:
  607. volume_bindings.update(
  608. get_container_data_volumes(previous_container, volumes_option))
  609. return volume_bindings
  610. def build_container_name(project, service, number, one_off=False):
  611. bits = [project, service]
  612. if one_off:
  613. bits.append('run')
  614. return '_'.join(bits + [str(number)])
  615. def build_container_labels(label_options, service_labels, number, one_off=False):
  616. labels = label_options or {}
  617. labels.update(label.split('=', 1) for label in service_labels)
  618. labels[LABEL_CONTAINER_NUMBER] = str(number)
  619. labels[LABEL_VERSION] = __version__
  620. return labels
  621. def check_for_legacy_containers(
  622. client,
  623. project,
  624. services,
  625. stopped=False,
  626. one_off=False):
  627. """Check if there are containers named using the old naming convention
  628. and warn the user that those containers may need to be migrated to
  629. using labels, so that compose can find them.
  630. """
  631. for container in client.containers(all=stopped):
  632. name = get_container_name(container)
  633. for service in services:
  634. prefix = '%s_%s_%s' % (project, service, 'run_' if one_off else '')
  635. if not name.startswith(prefix):
  636. continue
  637. log.warn(
  638. "Compose found a found a container named %s without any "
  639. "labels. As of compose 1.3.0 containers are identified with "
  640. "labels instead of naming convention. If you'd like compose "
  641. "to use this container, please run "
  642. "`docker-compose migrate-to-labels`" % (name,))
  643. def parse_restart_spec(restart_config):
  644. if not restart_config:
  645. return None
  646. parts = restart_config.split(':')
  647. if len(parts) > 2:
  648. raise ConfigError("Restart %s has incorrect format, should be "
  649. "mode[:max_retry]" % restart_config)
  650. if len(parts) == 2:
  651. name, max_retry_count = parts
  652. else:
  653. name, = parts
  654. max_retry_count = 0
  655. return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
  656. def parse_volume_spec(volume_config):
  657. parts = volume_config.split(':')
  658. if len(parts) > 3:
  659. raise ConfigError("Volume %s has incorrect format, should be "
  660. "external:internal[:mode]" % volume_config)
  661. if len(parts) == 1:
  662. return VolumeSpec(None, parts[0], 'rw')
  663. if len(parts) == 2:
  664. parts.append('rw')
  665. external, internal, mode = parts
  666. if mode not in ('rw', 'ro'):
  667. raise ConfigError("Volume %s has invalid mode (%s), should be "
  668. "one of: rw, ro." % (volume_config, mode))
  669. return VolumeSpec(external, internal, mode)
  670. def parse_repository_tag(s):
  671. if ":" not in s:
  672. return s, ""
  673. repo, tag = s.rsplit(":", 1)
  674. if "/" in tag:
  675. return s, ""
  676. return repo, tag
  677. def build_volume_binding(volume_spec):
  678. internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
  679. return volume_spec.external, internal
  680. def build_port_bindings(ports):
  681. port_bindings = {}
  682. for port in ports:
  683. internal_port, external = split_port(port)
  684. if internal_port in port_bindings:
  685. port_bindings[internal_port].append(external)
  686. else:
  687. port_bindings[internal_port] = [external]
  688. return port_bindings
  689. def split_port(port):
  690. parts = str(port).split(':')
  691. if not 1 <= len(parts) <= 3:
  692. raise ConfigError('Invalid port "%s", should be '
  693. '[[remote_ip:]remote_port:]port[/protocol]' % port)
  694. if len(parts) == 1:
  695. internal_port, = parts
  696. return internal_port, None
  697. if len(parts) == 2:
  698. external_port, internal_port = parts
  699. return internal_port, external_port
  700. external_ip, external_port, internal_port = parts
  701. return internal_port, (external_ip, external_port or None)
  702. def build_extra_hosts(extra_hosts_config):
  703. if not extra_hosts_config:
  704. return {}
  705. if isinstance(extra_hosts_config, list):
  706. extra_hosts_dict = {}
  707. for extra_hosts_line in extra_hosts_config:
  708. if not isinstance(extra_hosts_line, six.string_types):
  709. raise ConfigError(
  710. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  711. extra_hosts_config
  712. )
  713. host, ip = extra_hosts_line.split(':')
  714. extra_hosts_dict.update({host.strip(): ip.strip()})
  715. extra_hosts_config = extra_hosts_dict
  716. if isinstance(extra_hosts_config, dict):
  717. return extra_hosts_config
  718. raise ConfigError(
  719. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  720. extra_hosts_config
  721. )