service.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from collections import namedtuple
  4. import logging
  5. import re
  6. from operator import attrgetter
  7. import sys
  8. import six
  9. from docker.errors import APIError
  10. from docker.utils import create_host_config
  11. from .config import DOCKER_CONFIG_KEYS
  12. from .container import Container, get_container_name
  13. from .progress_stream import stream_output, StreamOutputError
  14. log = logging.getLogger(__name__)
  15. DOCKER_START_KEYS = [
  16. 'cap_add',
  17. 'cap_drop',
  18. 'dns',
  19. 'dns_search',
  20. 'env_file',
  21. 'extra_hosts',
  22. 'net',
  23. 'pid',
  24. 'privileged',
  25. 'restart',
  26. ]
  27. VALID_NAME_CHARS = '[a-zA-Z0-9]'
  28. class BuildError(Exception):
  29. def __init__(self, service, reason):
  30. self.service = service
  31. self.reason = reason
  32. class CannotBeScaledError(Exception):
  33. pass
  34. class ConfigError(ValueError):
  35. pass
  36. VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
  37. ServiceName = namedtuple('ServiceName', 'project service number')
  38. class Service(object):
  39. def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
  40. if not re.match('^%s+$' % VALID_NAME_CHARS, name):
  41. raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
  42. if not re.match('^%s+$' % VALID_NAME_CHARS, project):
  43. raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
  44. if 'image' in options and 'build' in options:
  45. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  46. if 'image' not in options and 'build' not in options:
  47. raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
  48. self.name = name
  49. self.client = client
  50. self.project = project
  51. self.links = links or []
  52. self.external_links = external_links or []
  53. self.volumes_from = volumes_from or []
  54. self.net = net or None
  55. self.options = options
  56. def containers(self, stopped=False, one_off=False):
  57. return [Container.from_ps(self.client, container)
  58. for container in self.client.containers(all=stopped)
  59. if self.has_container(container, one_off=one_off)]
  60. def has_container(self, container, one_off=False):
  61. """Return True if `container` was created to fulfill this service."""
  62. name = get_container_name(container)
  63. if not name or not is_valid_name(name, one_off):
  64. return False
  65. project, name, _number = parse_name(name)
  66. return project == self.project and name == self.name
  67. def get_container(self, number=1):
  68. """Return a :class:`compose.container.Container` for this service. The
  69. container must be active, and match `number`.
  70. """
  71. for container in self.client.containers():
  72. if not self.has_container(container):
  73. continue
  74. _, _, container_number = parse_name(get_container_name(container))
  75. if container_number == number:
  76. return Container.from_ps(self.client, container)
  77. raise ValueError("No container found for %s_%s" % (self.name, number))
  78. def start(self, **options):
  79. for c in self.containers(stopped=True):
  80. self.start_container_if_stopped(c, **options)
  81. def stop(self, **options):
  82. for c in self.containers():
  83. log.info("Stopping %s..." % c.name)
  84. c.stop(**options)
  85. def kill(self, **options):
  86. for c in self.containers():
  87. log.info("Killing %s..." % c.name)
  88. c.kill(**options)
  89. def restart(self, **options):
  90. for c in self.containers():
  91. log.info("Restarting %s..." % c.name)
  92. c.restart(**options)
  93. def scale(self, desired_num):
  94. """
  95. Adjusts the number of containers to the specified number and ensures
  96. they are running.
  97. - creates containers until there are at least `desired_num`
  98. - stops containers until there are at most `desired_num` running
  99. - starts containers until there are at least `desired_num` running
  100. - removes all stopped containers
  101. """
  102. if not self.can_be_scaled():
  103. raise CannotBeScaledError()
  104. # Create enough containers
  105. containers = self.containers(stopped=True)
  106. while len(containers) < desired_num:
  107. log.info("Creating %s..." % self._next_container_name(containers))
  108. containers.append(self.create_container(detach=True))
  109. running_containers = []
  110. stopped_containers = []
  111. for c in containers:
  112. if c.is_running:
  113. running_containers.append(c)
  114. else:
  115. stopped_containers.append(c)
  116. running_containers.sort(key=lambda c: c.number)
  117. stopped_containers.sort(key=lambda c: c.number)
  118. # Stop containers
  119. while len(running_containers) > desired_num:
  120. c = running_containers.pop()
  121. log.info("Stopping %s..." % c.name)
  122. c.stop(timeout=1)
  123. stopped_containers.append(c)
  124. # Start containers
  125. while len(running_containers) < desired_num:
  126. c = stopped_containers.pop(0)
  127. log.info("Starting %s..." % c.name)
  128. self.start_container(c)
  129. running_containers.append(c)
  130. self.remove_stopped()
  131. def remove_stopped(self, **options):
  132. for c in self.containers(stopped=True):
  133. if not c.is_running:
  134. log.info("Removing %s..." % c.name)
  135. c.remove(**options)
  136. def create_container(self,
  137. one_off=False,
  138. insecure_registry=False,
  139. do_build=True,
  140. intermediate_container=None,
  141. **override_options):
  142. """
  143. Create a container for this service. If the image doesn't exist, attempt to pull
  144. it.
  145. """
  146. container_options = self._get_container_create_options(
  147. override_options,
  148. one_off=one_off,
  149. intermediate_container=intermediate_container,
  150. )
  151. if (do_build and
  152. self.can_be_built() and
  153. not self.client.images(name=self.full_name)):
  154. self.build()
  155. try:
  156. return Container.create(self.client, **container_options)
  157. except APIError as e:
  158. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  159. self.pull(insecure_registry=insecure_registry)
  160. return Container.create(self.client, **container_options)
  161. raise
  162. def recreate_containers(self, insecure_registry=False, do_build=True, **override_options):
  163. """
  164. If a container for this service doesn't exist, create and start one. If there are
  165. any, stop them, create+start new ones, and remove the old containers.
  166. """
  167. containers = self.containers(stopped=True)
  168. if not containers:
  169. log.info("Creating %s..." % self._next_container_name(containers))
  170. container = self.create_container(
  171. insecure_registry=insecure_registry,
  172. do_build=do_build,
  173. **override_options)
  174. self.start_container(container)
  175. return [(None, container)]
  176. else:
  177. tuples = []
  178. for c in containers:
  179. log.info("Recreating %s..." % c.name)
  180. tuples.append(self.recreate_container(c, insecure_registry=insecure_registry, **override_options))
  181. return tuples
  182. def recreate_container(self, container, **override_options):
  183. """Recreate a container. An intermediate container is created so that
  184. the new container has the same name, while still supporting
  185. `volumes-from` the original container.
  186. """
  187. try:
  188. container.stop()
  189. except APIError as e:
  190. if (e.response.status_code == 500
  191. and e.explanation
  192. and 'no such process' in str(e.explanation)):
  193. pass
  194. else:
  195. raise
  196. intermediate_container = Container.create(
  197. self.client,
  198. image=container.image,
  199. entrypoint=['/bin/echo'],
  200. command=[],
  201. detach=True,
  202. host_config=create_host_config(volumes_from=[container.id]),
  203. )
  204. intermediate_container.start()
  205. intermediate_container.wait()
  206. container.remove()
  207. options = dict(override_options)
  208. new_container = self.create_container(
  209. do_build=False,
  210. intermediate_container=intermediate_container,
  211. **options
  212. )
  213. self.start_container(new_container)
  214. intermediate_container.remove()
  215. return (intermediate_container, new_container)
  216. def start_container_if_stopped(self, container):
  217. if container.is_running:
  218. return container
  219. else:
  220. log.info("Starting %s..." % container.name)
  221. return self.start_container(container)
  222. def start_container(self, container):
  223. container.start()
  224. return container
  225. def start_or_create_containers(
  226. self,
  227. insecure_registry=False,
  228. detach=False,
  229. do_build=True):
  230. containers = self.containers(stopped=True)
  231. if not containers:
  232. log.info("Creating %s..." % self._next_container_name(containers))
  233. new_container = self.create_container(
  234. insecure_registry=insecure_registry,
  235. detach=detach,
  236. do_build=do_build,
  237. )
  238. return [self.start_container(new_container)]
  239. else:
  240. return [self.start_container_if_stopped(c) for c in containers]
  241. def get_linked_names(self):
  242. return [s.name for (s, _) in self.links]
  243. def get_volumes_from_names(self):
  244. return [s.name for s in self.volumes_from if isinstance(s, Service)]
  245. def get_net_name(self):
  246. if isinstance(self.net, Service):
  247. return self.net.name
  248. else:
  249. return
  250. def _next_container_name(self, all_containers, one_off=False):
  251. bits = [self.project, self.name]
  252. if one_off:
  253. bits.append('run')
  254. return '_'.join(bits + [str(self._next_container_number(all_containers))])
  255. def _next_container_number(self, all_containers):
  256. numbers = [parse_name(c.name).number for c in all_containers]
  257. return 1 if not numbers else max(numbers) + 1
  258. def _get_links(self, link_to_self):
  259. links = []
  260. for service, link_name in self.links:
  261. for container in service.containers():
  262. links.append((container.name, link_name or service.name))
  263. links.append((container.name, container.name))
  264. links.append((container.name, container.name_without_project))
  265. if link_to_self:
  266. for container in self.containers():
  267. links.append((container.name, self.name))
  268. links.append((container.name, container.name))
  269. links.append((container.name, container.name_without_project))
  270. for external_link in self.external_links:
  271. if ':' not in external_link:
  272. link_name = external_link
  273. else:
  274. external_link, link_name = external_link.split(':')
  275. links.append((external_link, link_name))
  276. return links
  277. def _get_volumes_from(self, intermediate_container=None):
  278. volumes_from = []
  279. for volume_source in self.volumes_from:
  280. if isinstance(volume_source, Service):
  281. containers = volume_source.containers(stopped=True)
  282. if not containers:
  283. volumes_from.append(volume_source.create_container().id)
  284. else:
  285. volumes_from.extend(map(attrgetter('id'), containers))
  286. elif isinstance(volume_source, Container):
  287. volumes_from.append(volume_source.id)
  288. if intermediate_container:
  289. volumes_from.append(intermediate_container.id)
  290. return volumes_from
  291. def _get_net(self):
  292. if not self.net:
  293. return "bridge"
  294. if isinstance(self.net, Service):
  295. containers = self.net.containers()
  296. if len(containers) > 0:
  297. net = 'container:' + containers[0].id
  298. else:
  299. log.warning("Warning: Service %s is trying to use reuse the network stack "
  300. "of another service that is not running." % (self.net.name))
  301. net = None
  302. elif isinstance(self.net, Container):
  303. net = 'container:' + self.net.id
  304. else:
  305. net = self.net
  306. return net
  307. def _get_container_create_options(self, override_options, one_off=False, intermediate_container=None):
  308. container_options = dict(
  309. (k, self.options[k])
  310. for k in DOCKER_CONFIG_KEYS if k in self.options)
  311. container_options.update(override_options)
  312. container_options['name'] = self._next_container_name(
  313. self.containers(stopped=True, one_off=one_off),
  314. one_off)
  315. # If a qualified hostname was given, split it into an
  316. # unqualified hostname and a domainname unless domainname
  317. # was also given explicitly. This matches the behavior of
  318. # the official Docker CLI in that scenario.
  319. if ('hostname' in container_options
  320. and 'domainname' not in container_options
  321. and '.' in container_options['hostname']):
  322. parts = container_options['hostname'].partition('.')
  323. container_options['hostname'] = parts[0]
  324. container_options['domainname'] = parts[2]
  325. if 'ports' in container_options or 'expose' in self.options:
  326. ports = []
  327. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  328. for port in all_ports:
  329. port = str(port)
  330. if ':' in port:
  331. port = port.split(':')[-1]
  332. if '/' in port:
  333. port = tuple(port.split('/'))
  334. ports.append(port)
  335. container_options['ports'] = ports
  336. if 'volumes' in container_options:
  337. container_options['volumes'] = dict(
  338. (parse_volume_spec(v).internal, {})
  339. for v in container_options['volumes'])
  340. if self.can_be_built():
  341. container_options['image'] = self.full_name
  342. # Delete options which are only used when starting
  343. for key in DOCKER_START_KEYS:
  344. container_options.pop(key, None)
  345. container_options['host_config'] = self._get_container_host_config(override_options, one_off=one_off, intermediate_container=intermediate_container)
  346. return container_options
  347. def _get_container_host_config(self, override_options, one_off=False, intermediate_container=None):
  348. options = dict(self.options, **override_options)
  349. port_bindings = build_port_bindings(options.get('ports') or [])
  350. volume_bindings = dict(
  351. build_volume_binding(parse_volume_spec(volume))
  352. for volume in options.get('volumes') or []
  353. if ':' in volume)
  354. privileged = options.get('privileged', False)
  355. cap_add = options.get('cap_add', None)
  356. cap_drop = options.get('cap_drop', None)
  357. pid = options.get('pid', None)
  358. dns = options.get('dns', None)
  359. if isinstance(dns, six.string_types):
  360. dns = [dns]
  361. dns_search = options.get('dns_search', None)
  362. if isinstance(dns_search, six.string_types):
  363. dns_search = [dns_search]
  364. restart = parse_restart_spec(options.get('restart', None))
  365. extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
  366. return create_host_config(
  367. links=self._get_links(link_to_self=one_off),
  368. port_bindings=port_bindings,
  369. binds=volume_bindings,
  370. volumes_from=self._get_volumes_from(intermediate_container),
  371. privileged=privileged,
  372. network_mode=self._get_net(),
  373. dns=dns,
  374. dns_search=dns_search,
  375. restart_policy=restart,
  376. cap_add=cap_add,
  377. cap_drop=cap_drop,
  378. extra_hosts=extra_hosts,
  379. pid_mode=pid
  380. )
  381. def build(self, no_cache=False):
  382. log.info('Building %s...' % self.name)
  383. path = six.binary_type(self.options['build'])
  384. build_output = self.client.build(
  385. path=path,
  386. tag=self.full_name,
  387. stream=True,
  388. rm=True,
  389. nocache=no_cache,
  390. dockerfile=self.options.get('dockerfile', None),
  391. )
  392. try:
  393. all_events = stream_output(build_output, sys.stdout)
  394. except StreamOutputError as e:
  395. raise BuildError(self, unicode(e))
  396. # Ensure the HTTP connection is not reused for another
  397. # streaming command, as the Docker daemon can sometimes
  398. # complain about it
  399. self.client.close()
  400. image_id = None
  401. for event in all_events:
  402. if 'stream' in event:
  403. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  404. if match:
  405. image_id = match.group(1)
  406. if image_id is None:
  407. raise BuildError(self, event if all_events else 'Unknown')
  408. return image_id
  409. def can_be_built(self):
  410. return 'build' in self.options
  411. @property
  412. def full_name(self):
  413. """
  414. The tag to give to images built for this service.
  415. """
  416. return '%s_%s' % (self.project, self.name)
  417. def can_be_scaled(self):
  418. for port in self.options.get('ports', []):
  419. if ':' in str(port):
  420. return False
  421. return True
  422. def pull(self, insecure_registry=False):
  423. if 'image' not in self.options:
  424. return
  425. repo, tag = parse_repository_tag(self.options['image'])
  426. tag = tag or 'latest'
  427. log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
  428. output = self.client.pull(
  429. repo,
  430. tag=tag,
  431. stream=True,
  432. insecure_registry=insecure_registry)
  433. stream_output(output, sys.stdout)
  434. NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
  435. def is_valid_name(name, one_off=False):
  436. match = NAME_RE.match(name)
  437. if match is None:
  438. return False
  439. if one_off:
  440. return match.group(3) == 'run_'
  441. else:
  442. return match.group(3) is None
  443. def parse_name(name):
  444. match = NAME_RE.match(name)
  445. (project, service_name, _, suffix) = match.groups()
  446. return ServiceName(project, service_name, int(suffix))
  447. def parse_restart_spec(restart_config):
  448. if not restart_config:
  449. return None
  450. parts = restart_config.split(':')
  451. if len(parts) > 2:
  452. raise ConfigError("Restart %s has incorrect format, should be "
  453. "mode[:max_retry]" % restart_config)
  454. if len(parts) == 2:
  455. name, max_retry_count = parts
  456. else:
  457. name, = parts
  458. max_retry_count = 0
  459. return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
  460. def parse_volume_spec(volume_config):
  461. parts = volume_config.split(':')
  462. if len(parts) > 3:
  463. raise ConfigError("Volume %s has incorrect format, should be "
  464. "external:internal[:mode]" % volume_config)
  465. if len(parts) == 1:
  466. return VolumeSpec(None, parts[0], 'rw')
  467. if len(parts) == 2:
  468. parts.append('rw')
  469. external, internal, mode = parts
  470. if mode not in ('rw', 'ro'):
  471. raise ConfigError("Volume %s has invalid mode (%s), should be "
  472. "one of: rw, ro." % (volume_config, mode))
  473. return VolumeSpec(external, internal, mode)
  474. def parse_repository_tag(s):
  475. if ":" not in s:
  476. return s, ""
  477. repo, tag = s.rsplit(":", 1)
  478. if "/" in tag:
  479. return s, ""
  480. return repo, tag
  481. def build_volume_binding(volume_spec):
  482. internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
  483. return volume_spec.external, internal
  484. def build_port_bindings(ports):
  485. port_bindings = {}
  486. for port in ports:
  487. internal_port, external = split_port(port)
  488. if internal_port in port_bindings:
  489. port_bindings[internal_port].append(external)
  490. else:
  491. port_bindings[internal_port] = [external]
  492. return port_bindings
  493. def split_port(port):
  494. parts = str(port).split(':')
  495. if not 1 <= len(parts) <= 3:
  496. raise ConfigError('Invalid port "%s", should be '
  497. '[[remote_ip:]remote_port:]port[/protocol]' % port)
  498. if len(parts) == 1:
  499. internal_port, = parts
  500. return internal_port, None
  501. if len(parts) == 2:
  502. external_port, internal_port = parts
  503. return internal_port, external_port
  504. external_ip, external_port, internal_port = parts
  505. return internal_port, (external_ip, external_port or None)
  506. def build_extra_hosts(extra_hosts_config):
  507. if not extra_hosts_config:
  508. return {}
  509. if isinstance(extra_hosts_config, list):
  510. extra_hosts_dict = {}
  511. for extra_hosts_line in extra_hosts_config:
  512. if not isinstance(extra_hosts_line, six.string_types):
  513. raise ConfigError(
  514. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  515. extra_hosts_config
  516. )
  517. host, ip = extra_hosts_line.split(':')
  518. extra_hosts_dict.update({host.strip(): ip.strip()})
  519. extra_hosts_config = extra_hosts_dict
  520. if isinstance(extra_hosts_config, dict):
  521. return extra_hosts_config
  522. raise ConfigError(
  523. "extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
  524. extra_hosts_config
  525. )