service.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from collections import namedtuple
  4. import logging
  5. import re
  6. from operator import attrgetter
  7. import sys
  8. import six
  9. from docker.errors import APIError
  10. from docker.utils import create_host_config
  11. from .config import DOCKER_CONFIG_KEYS
  12. from .container import Container, get_container_name
  13. from .progress_stream import stream_output, StreamOutputError
  14. log = logging.getLogger(__name__)
  15. DOCKER_START_KEYS = [
  16. 'cap_add',
  17. 'cap_drop',
  18. 'dns',
  19. 'dns_search',
  20. 'env_file',
  21. 'net',
  22. 'privileged',
  23. 'restart',
  24. ]
  25. VALID_NAME_CHARS = '[a-zA-Z0-9]'
  26. class BuildError(Exception):
  27. def __init__(self, service, reason):
  28. self.service = service
  29. self.reason = reason
  30. class CannotBeScaledError(Exception):
  31. pass
  32. class ConfigError(ValueError):
  33. pass
  34. VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
  35. ServiceName = namedtuple('ServiceName', 'project service number')
  36. class Service(object):
  37. def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
  38. if not re.match('^%s+$' % VALID_NAME_CHARS, name):
  39. raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
  40. if not re.match('^%s+$' % VALID_NAME_CHARS, project):
  41. raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
  42. if 'image' in options and 'build' in options:
  43. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  44. self.name = name
  45. self.client = client
  46. self.project = project
  47. self.links = links or []
  48. self.external_links = external_links or []
  49. self.volumes_from = volumes_from or []
  50. self.net = net or None
  51. self.options = options
  52. def containers(self, stopped=False, one_off=False):
  53. return [Container.from_ps(self.client, container)
  54. for container in self.client.containers(all=stopped)
  55. if self.has_container(container, one_off=one_off)]
  56. def has_container(self, container, one_off=False):
  57. """Return True if `container` was created to fulfill this service."""
  58. name = get_container_name(container)
  59. if not name or not is_valid_name(name, one_off):
  60. return False
  61. project, name, _number = parse_name(name)
  62. return project == self.project and name == self.name
  63. def get_container(self, number=1):
  64. """Return a :class:`compose.container.Container` for this service. The
  65. container must be active, and match `number`.
  66. """
  67. for container in self.client.containers():
  68. if not self.has_container(container):
  69. continue
  70. _, _, container_number = parse_name(get_container_name(container))
  71. if container_number == number:
  72. return Container.from_ps(self.client, container)
  73. raise ValueError("No container found for %s_%s" % (self.name, number))
  74. def start(self, **options):
  75. for c in self.containers(stopped=True):
  76. self.start_container_if_stopped(c, **options)
  77. def stop(self, **options):
  78. for c in self.containers():
  79. log.info("Stopping %s..." % c.name)
  80. c.stop(**options)
  81. def kill(self, **options):
  82. for c in self.containers():
  83. log.info("Killing %s..." % c.name)
  84. c.kill(**options)
  85. def restart(self, **options):
  86. for c in self.containers():
  87. log.info("Restarting %s..." % c.name)
  88. c.restart(**options)
  89. def scale(self, desired_num):
  90. """
  91. Adjusts the number of containers to the specified number and ensures
  92. they are running.
  93. - creates containers until there are at least `desired_num`
  94. - stops containers until there are at most `desired_num` running
  95. - starts containers until there are at least `desired_num` running
  96. - removes all stopped containers
  97. """
  98. if not self.can_be_scaled():
  99. raise CannotBeScaledError()
  100. # Create enough containers
  101. containers = self.containers(stopped=True)
  102. while len(containers) < desired_num:
  103. log.info("Creating %s..." % self._next_container_name(containers))
  104. containers.append(self.create_container(detach=True))
  105. running_containers = []
  106. stopped_containers = []
  107. for c in containers:
  108. if c.is_running:
  109. running_containers.append(c)
  110. else:
  111. stopped_containers.append(c)
  112. running_containers.sort(key=lambda c: c.number)
  113. stopped_containers.sort(key=lambda c: c.number)
  114. # Stop containers
  115. while len(running_containers) > desired_num:
  116. c = running_containers.pop()
  117. log.info("Stopping %s..." % c.name)
  118. c.stop(timeout=1)
  119. stopped_containers.append(c)
  120. # Start containers
  121. while len(running_containers) < desired_num:
  122. c = stopped_containers.pop(0)
  123. log.info("Starting %s..." % c.name)
  124. self.start_container(c)
  125. running_containers.append(c)
  126. self.remove_stopped()
  127. def remove_stopped(self, **options):
  128. for c in self.containers(stopped=True):
  129. if not c.is_running:
  130. log.info("Removing %s..." % c.name)
  131. c.remove(**options)
  132. def create_container(self,
  133. one_off=False,
  134. insecure_registry=False,
  135. do_build=True,
  136. intermediate_container=None,
  137. **override_options):
  138. """
  139. Create a container for this service. If the image doesn't exist, attempt to pull
  140. it.
  141. """
  142. container_options = self._get_container_create_options(
  143. override_options,
  144. one_off=one_off,
  145. intermediate_container=intermediate_container,
  146. )
  147. if (do_build and
  148. self.can_be_built() and
  149. not self.client.images(name=self.full_name)):
  150. self.build()
  151. try:
  152. return Container.create(self.client, **container_options)
  153. except APIError as e:
  154. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  155. log.info('Pulling image %s...' % container_options['image'])
  156. output = self.client.pull(
  157. container_options['image'],
  158. stream=True,
  159. insecure_registry=insecure_registry
  160. )
  161. stream_output(output, sys.stdout)
  162. return Container.create(self.client, **container_options)
  163. raise
  164. def recreate_containers(self, insecure_registry=False, do_build=True, **override_options):
  165. """
  166. If a container for this service doesn't exist, create and start one. If there are
  167. any, stop them, create+start new ones, and remove the old containers.
  168. """
  169. containers = self.containers(stopped=True)
  170. if not containers:
  171. log.info("Creating %s..." % self._next_container_name(containers))
  172. container = self.create_container(
  173. insecure_registry=insecure_registry,
  174. do_build=do_build,
  175. **override_options)
  176. self.start_container(container)
  177. return [(None, container)]
  178. else:
  179. tuples = []
  180. for c in containers:
  181. log.info("Recreating %s..." % c.name)
  182. tuples.append(self.recreate_container(c, insecure_registry=insecure_registry, **override_options))
  183. return tuples
  184. def recreate_container(self, container, **override_options):
  185. """Recreate a container. An intermediate container is created so that
  186. the new container has the same name, while still supporting
  187. `volumes-from` the original container.
  188. """
  189. try:
  190. container.stop()
  191. except APIError as e:
  192. if (e.response.status_code == 500
  193. and e.explanation
  194. and 'no such process' in str(e.explanation)):
  195. pass
  196. else:
  197. raise
  198. intermediate_container = Container.create(
  199. self.client,
  200. image=container.image,
  201. entrypoint=['/bin/echo'],
  202. command=[],
  203. detach=True,
  204. host_config=create_host_config(volumes_from=[container.id]),
  205. )
  206. intermediate_container.start()
  207. intermediate_container.wait()
  208. container.remove()
  209. options = dict(override_options)
  210. new_container = self.create_container(
  211. do_build=False,
  212. intermediate_container=intermediate_container,
  213. **options
  214. )
  215. self.start_container(new_container)
  216. intermediate_container.remove()
  217. return (intermediate_container, new_container)
  218. def start_container_if_stopped(self, container):
  219. if container.is_running:
  220. return container
  221. else:
  222. log.info("Starting %s..." % container.name)
  223. return self.start_container(container)
  224. def start_container(self, container):
  225. container.start()
  226. return container
  227. def start_or_create_containers(
  228. self,
  229. insecure_registry=False,
  230. detach=False,
  231. do_build=True):
  232. containers = self.containers(stopped=True)
  233. if not containers:
  234. log.info("Creating %s..." % self._next_container_name(containers))
  235. new_container = self.create_container(
  236. insecure_registry=insecure_registry,
  237. detach=detach,
  238. do_build=do_build,
  239. )
  240. return [self.start_container(new_container)]
  241. else:
  242. return [self.start_container_if_stopped(c) for c in containers]
  243. def get_linked_names(self):
  244. return [s.name for (s, _) in self.links]
  245. def get_volumes_from_names(self):
  246. return [s.name for s in self.volumes_from if isinstance(s, Service)]
  247. def get_net_name(self):
  248. if isinstance(self.net, Service):
  249. return self.net.name
  250. else:
  251. return
  252. def _next_container_name(self, all_containers, one_off=False):
  253. bits = [self.project, self.name]
  254. if one_off:
  255. bits.append('run')
  256. return '_'.join(bits + [str(self._next_container_number(all_containers))])
  257. def _next_container_number(self, all_containers):
  258. numbers = [parse_name(c.name).number for c in all_containers]
  259. return 1 if not numbers else max(numbers) + 1
  260. def _get_links(self, link_to_self):
  261. links = []
  262. for service, link_name in self.links:
  263. for container in service.containers():
  264. links.append((container.name, link_name or service.name))
  265. links.append((container.name, container.name))
  266. links.append((container.name, container.name_without_project))
  267. if link_to_self:
  268. for container in self.containers():
  269. links.append((container.name, self.name))
  270. links.append((container.name, container.name))
  271. links.append((container.name, container.name_without_project))
  272. for external_link in self.external_links:
  273. if ':' not in external_link:
  274. link_name = external_link
  275. else:
  276. external_link, link_name = external_link.split(':')
  277. links.append((external_link, link_name))
  278. return links
  279. def _get_volumes_from(self, intermediate_container=None):
  280. volumes_from = []
  281. for volume_source in self.volumes_from:
  282. if isinstance(volume_source, Service):
  283. containers = volume_source.containers(stopped=True)
  284. if not containers:
  285. volumes_from.append(volume_source.create_container().id)
  286. else:
  287. volumes_from.extend(map(attrgetter('id'), containers))
  288. elif isinstance(volume_source, Container):
  289. volumes_from.append(volume_source.id)
  290. if intermediate_container:
  291. volumes_from.append(intermediate_container.id)
  292. return volumes_from
  293. def _get_net(self):
  294. if not self.net:
  295. return "bridge"
  296. if isinstance(self.net, Service):
  297. containers = self.net.containers()
  298. if len(containers) > 0:
  299. net = 'container:' + containers[0].id
  300. else:
  301. log.warning("Warning: Service %s is trying to use reuse the network stack "
  302. "of another service that is not running." % (self.net.name))
  303. net = None
  304. elif isinstance(self.net, Container):
  305. net = 'container:' + self.net.id
  306. else:
  307. net = self.net
  308. return net
  309. def _get_container_create_options(self, override_options, one_off=False, intermediate_container=None):
  310. container_options = dict(
  311. (k, self.options[k])
  312. for k in DOCKER_CONFIG_KEYS if k in self.options)
  313. container_options.update(override_options)
  314. container_options['name'] = self._next_container_name(
  315. self.containers(stopped=True, one_off=one_off),
  316. one_off)
  317. # If a qualified hostname was given, split it into an
  318. # unqualified hostname and a domainname unless domainname
  319. # was also given explicitly. This matches the behavior of
  320. # the official Docker CLI in that scenario.
  321. if ('hostname' in container_options
  322. and 'domainname' not in container_options
  323. and '.' in container_options['hostname']):
  324. parts = container_options['hostname'].partition('.')
  325. container_options['hostname'] = parts[0]
  326. container_options['domainname'] = parts[2]
  327. if 'ports' in container_options or 'expose' in self.options:
  328. ports = []
  329. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  330. for port in all_ports:
  331. port = str(port)
  332. if ':' in port:
  333. port = port.split(':')[-1]
  334. if '/' in port:
  335. port = tuple(port.split('/'))
  336. ports.append(port)
  337. container_options['ports'] = ports
  338. if 'volumes' in container_options:
  339. container_options['volumes'] = dict(
  340. (parse_volume_spec(v).internal, {})
  341. for v in container_options['volumes'])
  342. if self.can_be_built():
  343. container_options['image'] = self.full_name
  344. else:
  345. container_options['image'] = self._get_image_name(container_options['image'])
  346. # Delete options which are only used when starting
  347. for key in DOCKER_START_KEYS:
  348. container_options.pop(key, None)
  349. container_options['host_config'] = self._get_container_host_config(override_options, one_off=one_off, intermediate_container=intermediate_container)
  350. return container_options
  351. def _get_container_host_config(self, override_options, one_off=False, intermediate_container=None):
  352. options = dict(self.options, **override_options)
  353. port_bindings = build_port_bindings(options.get('ports') or [])
  354. volume_bindings = dict(
  355. build_volume_binding(parse_volume_spec(volume))
  356. for volume in options.get('volumes') or []
  357. if ':' in volume)
  358. privileged = options.get('privileged', False)
  359. cap_add = options.get('cap_add', None)
  360. cap_drop = options.get('cap_drop', None)
  361. dns = options.get('dns', None)
  362. if isinstance(dns, six.string_types):
  363. dns = [dns]
  364. dns_search = options.get('dns_search', None)
  365. if isinstance(dns_search, six.string_types):
  366. dns_search = [dns_search]
  367. restart = parse_restart_spec(options.get('restart', None))
  368. return create_host_config(
  369. links=self._get_links(link_to_self=one_off),
  370. port_bindings=port_bindings,
  371. binds=volume_bindings,
  372. volumes_from=self._get_volumes_from(intermediate_container),
  373. privileged=privileged,
  374. network_mode=self._get_net(),
  375. dns=dns,
  376. dns_search=dns_search,
  377. restart_policy=restart,
  378. cap_add=cap_add,
  379. cap_drop=cap_drop,
  380. )
  381. def _get_image_name(self, image):
  382. repo, tag = parse_repository_tag(image)
  383. if tag == "":
  384. tag = "latest"
  385. return '%s:%s' % (repo, tag)
  386. def build(self, no_cache=False):
  387. log.info('Building %s...' % self.name)
  388. build_output = self.client.build(
  389. self.options['build'],
  390. tag=self.full_name,
  391. stream=True,
  392. rm=True,
  393. nocache=no_cache,
  394. )
  395. try:
  396. all_events = stream_output(build_output, sys.stdout)
  397. except StreamOutputError as e:
  398. raise BuildError(self, unicode(e))
  399. image_id = None
  400. for event in all_events:
  401. if 'stream' in event:
  402. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  403. if match:
  404. image_id = match.group(1)
  405. if image_id is None:
  406. raise BuildError(self, event if all_events else 'Unknown')
  407. return image_id
  408. def can_be_built(self):
  409. return 'build' in self.options
  410. @property
  411. def full_name(self):
  412. """
  413. The tag to give to images built for this service.
  414. """
  415. return '%s_%s' % (self.project, self.name)
  416. def can_be_scaled(self):
  417. for port in self.options.get('ports', []):
  418. if ':' in str(port):
  419. return False
  420. return True
  421. def pull(self, insecure_registry=False):
  422. if 'image' in self.options:
  423. image_name = self._get_image_name(self.options['image'])
  424. log.info('Pulling %s (%s)...' % (self.name, image_name))
  425. self.client.pull(
  426. image_name,
  427. insecure_registry=insecure_registry
  428. )
  429. NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
  430. def is_valid_name(name, one_off=False):
  431. match = NAME_RE.match(name)
  432. if match is None:
  433. return False
  434. if one_off:
  435. return match.group(3) == 'run_'
  436. else:
  437. return match.group(3) is None
  438. def parse_name(name):
  439. match = NAME_RE.match(name)
  440. (project, service_name, _, suffix) = match.groups()
  441. return ServiceName(project, service_name, int(suffix))
  442. def parse_restart_spec(restart_config):
  443. if not restart_config:
  444. return None
  445. parts = restart_config.split(':')
  446. if len(parts) > 2:
  447. raise ConfigError("Restart %s has incorrect format, should be "
  448. "mode[:max_retry]" % restart_config)
  449. if len(parts) == 2:
  450. name, max_retry_count = parts
  451. else:
  452. name, = parts
  453. max_retry_count = 0
  454. return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
  455. def parse_volume_spec(volume_config):
  456. parts = volume_config.split(':')
  457. if len(parts) > 3:
  458. raise ConfigError("Volume %s has incorrect format, should be "
  459. "external:internal[:mode]" % volume_config)
  460. if len(parts) == 1:
  461. return VolumeSpec(None, parts[0], 'rw')
  462. if len(parts) == 2:
  463. parts.append('rw')
  464. external, internal, mode = parts
  465. if mode not in ('rw', 'ro'):
  466. raise ConfigError("Volume %s has invalid mode (%s), should be "
  467. "one of: rw, ro." % (volume_config, mode))
  468. return VolumeSpec(external, internal, mode)
  469. def parse_repository_tag(s):
  470. if ":" not in s:
  471. return s, ""
  472. repo, tag = s.rsplit(":", 1)
  473. if "/" in tag:
  474. return s, ""
  475. return repo, tag
  476. def build_volume_binding(volume_spec):
  477. internal = {'bind': volume_spec.internal, 'ro': volume_spec.mode == 'ro'}
  478. return volume_spec.external, internal
  479. def build_port_bindings(ports):
  480. port_bindings = {}
  481. for port in ports:
  482. internal_port, external = split_port(port)
  483. if internal_port in port_bindings:
  484. port_bindings[internal_port].append(external)
  485. else:
  486. port_bindings[internal_port] = [external]
  487. return port_bindings
  488. def split_port(port):
  489. parts = str(port).split(':')
  490. if not 1 <= len(parts) <= 3:
  491. raise ConfigError('Invalid port "%s", should be '
  492. '[[remote_ip:]remote_port:]port[/protocol]' % port)
  493. if len(parts) == 1:
  494. internal_port, = parts
  495. return internal_port, None
  496. if len(parts) == 2:
  497. external_port, internal_port = parts
  498. return internal_port, external_port
  499. external_ip, external_port, internal_port = parts
  500. return internal_port, (external_ip, external_port or None)