service.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from .packages.docker.client import APIError
  4. import logging
  5. import re
  6. import os
  7. import sys
  8. import json
  9. from .container import Container
  10. log = logging.getLogger(__name__)
  11. DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'volumes_from', 'entrypoint', 'privileged']
  12. DOCKER_CONFIG_HINTS = {
  13. 'link' : 'links',
  14. 'port' : 'ports',
  15. 'privilege' : 'privileged',
  16. 'priviliged': 'privileged',
  17. 'privilige' : 'privileged',
  18. 'volume' : 'volumes',
  19. }
  20. class BuildError(Exception):
  21. def __init__(self, service):
  22. self.service = service
  23. class CannotBeScaledError(Exception):
  24. pass
  25. class ConfigError(ValueError):
  26. pass
  27. class Service(object):
  28. def __init__(self, name, client=None, project='default', links=[], **options):
  29. if not re.match('^[a-zA-Z0-9]+$', name):
  30. raise ConfigError('Invalid name: %s' % name)
  31. if not re.match('^[a-zA-Z0-9]+$', project):
  32. raise ConfigError('Invalid project: %s' % project)
  33. if 'image' in options and 'build' in options:
  34. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  35. supported_options = DOCKER_CONFIG_KEYS + ['build', 'expose']
  36. for k in options:
  37. if k not in supported_options:
  38. msg = "Unsupported config option for %s service: '%s'" % (name, k)
  39. if k in DOCKER_CONFIG_HINTS:
  40. msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k]
  41. raise ConfigError(msg)
  42. self.name = name
  43. self.client = client
  44. self.project = project
  45. self.links = links or []
  46. self.options = options
  47. def containers(self, stopped=False, one_off=False):
  48. l = []
  49. for container in self.client.containers(all=stopped):
  50. name = get_container_name(container)
  51. if not name or not is_valid_name(name, one_off):
  52. continue
  53. project, name, number = parse_name(name)
  54. if project == self.project and name == self.name:
  55. l.append(Container.from_ps(self.client, container))
  56. return l
  57. def start(self, **options):
  58. for c in self.containers(stopped=True):
  59. if not c.is_running:
  60. log.info("Starting %s..." % c.name)
  61. self.start_container(c, **options)
  62. def stop(self, **options):
  63. for c in self.containers():
  64. log.info("Stopping %s..." % c.name)
  65. c.stop(**options)
  66. def kill(self, **options):
  67. for c in self.containers():
  68. log.info("Killing %s..." % c.name)
  69. c.kill(**options)
  70. def scale(self, desired_num):
  71. """
  72. Adjusts the number of containers to the specified number and ensures they are running.
  73. - creates containers until there are at least `desired_num`
  74. - stops containers until there are at most `desired_num` running
  75. - starts containers until there are at least `desired_num` running
  76. - removes all stopped containers
  77. """
  78. if not self.can_be_scaled():
  79. raise CannotBeScaledError()
  80. # Create enough containers
  81. containers = self.containers(stopped=True)
  82. while len(containers) < desired_num:
  83. containers.append(self.create_container())
  84. running_containers = []
  85. stopped_containers = []
  86. for c in containers:
  87. if c.is_running:
  88. running_containers.append(c)
  89. else:
  90. stopped_containers.append(c)
  91. running_containers.sort(key=lambda c: c.number)
  92. stopped_containers.sort(key=lambda c: c.number)
  93. # Stop containers
  94. while len(running_containers) > desired_num:
  95. c = running_containers.pop()
  96. log.info("Stopping %s..." % c.name)
  97. c.stop(timeout=1)
  98. stopped_containers.append(c)
  99. # Start containers
  100. while len(running_containers) < desired_num:
  101. c = stopped_containers.pop(0)
  102. log.info("Starting %s..." % c.name)
  103. self.start_container(c)
  104. running_containers.append(c)
  105. self.remove_stopped()
  106. def remove_stopped(self, **options):
  107. for c in self.containers(stopped=True):
  108. if not c.is_running:
  109. log.info("Removing %s..." % c.name)
  110. c.remove(**options)
  111. def create_container(self, one_off=False, **override_options):
  112. """
  113. Create a container for this service. If the image doesn't exist, attempt to pull
  114. it.
  115. """
  116. container_options = self._get_container_create_options(override_options, one_off=one_off)
  117. try:
  118. return Container.create(self.client, **container_options)
  119. except APIError as e:
  120. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  121. log.info('Pulling image %s...' % container_options['image'])
  122. output = self.client.pull(container_options['image'], stream=True)
  123. stream_output(output, sys.stdout)
  124. return Container.create(self.client, **container_options)
  125. raise
  126. def recreate_containers(self, **override_options):
  127. """
  128. If a container for this service doesn't exist, create and start one. If there are
  129. any, stop them, create+start new ones, and remove the old containers.
  130. """
  131. containers = self.containers(stopped=True)
  132. if len(containers) == 0:
  133. log.info("Creating %s..." % self.next_container_name())
  134. container = self.create_container(**override_options)
  135. self.start_container(container)
  136. return [(None, container)]
  137. else:
  138. tuples = []
  139. for c in containers:
  140. log.info("Recreating %s..." % c.name)
  141. tuples.append(self.recreate_container(c, **override_options))
  142. return tuples
  143. def recreate_container(self, container, **override_options):
  144. if container.is_running:
  145. container.stop(timeout=1)
  146. intermediate_container = Container.create(
  147. self.client,
  148. image=container.image,
  149. volumes_from=container.id,
  150. entrypoint=['echo'],
  151. command=[],
  152. )
  153. intermediate_container.start(volumes_from=container.id)
  154. intermediate_container.wait()
  155. container.remove()
  156. options = dict(override_options)
  157. options['volumes_from'] = intermediate_container.id
  158. new_container = self.create_container(**options)
  159. self.start_container(new_container, volumes_from=intermediate_container.id)
  160. intermediate_container.remove()
  161. return (intermediate_container, new_container)
  162. def start_container(self, container=None, volumes_from=None, **override_options):
  163. if container is None:
  164. container = self.create_container(**override_options)
  165. options = self.options.copy()
  166. options.update(override_options)
  167. port_bindings = {}
  168. if options.get('ports', None) is not None:
  169. for port in options['ports']:
  170. port = str(port)
  171. if ':' in port:
  172. external_port, internal_port = port.split(':', 1)
  173. else:
  174. external_port, internal_port = (None, port)
  175. port_bindings[internal_port] = external_port
  176. volume_bindings = {}
  177. if options.get('volumes', None) is not None:
  178. for volume in options['volumes']:
  179. if ':' in volume:
  180. external_dir, internal_dir = volume.split(':')
  181. volume_bindings[os.path.abspath(external_dir)] = internal_dir
  182. privileged = options.get('privileged', False)
  183. container.start(
  184. links=self._get_links(link_to_self=override_options.get('one_off', False)),
  185. port_bindings=port_bindings,
  186. binds=volume_bindings,
  187. volumes_from=volumes_from,
  188. privileged=privileged,
  189. )
  190. return container
  191. def next_container_name(self, one_off=False):
  192. bits = [self.project, self.name]
  193. if one_off:
  194. bits.append('run')
  195. return '_'.join(bits + [str(self.next_container_number(one_off=one_off))])
  196. def next_container_number(self, one_off=False):
  197. numbers = [parse_name(c.name)[2] for c in self.containers(stopped=True, one_off=one_off)]
  198. if len(numbers) == 0:
  199. return 1
  200. else:
  201. return max(numbers) + 1
  202. def _get_links(self, link_to_self):
  203. links = []
  204. for service, link_name in self.links:
  205. for container in service.containers():
  206. if link_name:
  207. links.append((container.name, link_name))
  208. links.append((container.name, container.name))
  209. links.append((container.name, container.name_without_project))
  210. if link_to_self:
  211. for container in self.containers():
  212. links.append((container.name, container.name))
  213. links.append((container.name, container.name_without_project))
  214. return links
  215. def _get_container_create_options(self, override_options, one_off=False):
  216. container_options = dict((k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options)
  217. container_options.update(override_options)
  218. container_options['name'] = self.next_container_name(one_off)
  219. if 'ports' in container_options or 'expose' in self.options:
  220. ports = []
  221. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  222. for port in all_ports:
  223. port = str(port)
  224. if ':' in port:
  225. port = port.split(':')[-1]
  226. if '/' in port:
  227. port = tuple(port.split('/'))
  228. ports.append(port)
  229. container_options['ports'] = ports
  230. if 'volumes' in container_options:
  231. container_options['volumes'] = dict((split_volume(v)[1], {}) for v in container_options['volumes'])
  232. if self.can_be_built():
  233. if len(self.client.images(name=self._build_tag_name())) == 0:
  234. self.build()
  235. container_options['image'] = self._build_tag_name()
  236. # Priviliged is only required for starting containers, not for creating them
  237. if 'privileged' in container_options:
  238. del container_options['privileged']
  239. return container_options
  240. def build(self):
  241. log.info('Building %s...' % self.name)
  242. build_output = self.client.build(
  243. self.options['build'],
  244. tag=self._build_tag_name(),
  245. stream=True
  246. )
  247. all_events = stream_output(build_output, sys.stdout)
  248. image_id = None
  249. for event in all_events:
  250. if 'stream' in event:
  251. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  252. if match:
  253. image_id = match.group(1)
  254. if image_id is None:
  255. raise BuildError(self)
  256. return image_id
  257. def can_be_built(self):
  258. return 'build' in self.options
  259. def _build_tag_name(self):
  260. """
  261. The tag to give to images built for this service.
  262. """
  263. return '%s_%s' % (self.project, self.name)
  264. def can_be_scaled(self):
  265. for port in self.options.get('ports', []):
  266. if ':' in str(port):
  267. return False
  268. return True
  269. def stream_output(output, stream):
  270. is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno())
  271. all_events = []
  272. lines = {}
  273. diff = 0
  274. for chunk in output:
  275. event = json.loads(chunk)
  276. all_events.append(event)
  277. if 'progress' in event or 'progressDetail' in event:
  278. image_id = event['id']
  279. if image_id in lines:
  280. diff = len(lines) - lines[image_id]
  281. else:
  282. lines[image_id] = len(lines)
  283. stream.write("\n")
  284. diff = 0
  285. if is_terminal:
  286. # move cursor up `diff` rows
  287. stream.write("%c[%dA" % (27, diff))
  288. try:
  289. print_output_event(event, stream, is_terminal)
  290. except Exception:
  291. stream.write(repr(event) + "\n")
  292. raise
  293. if 'id' in event and is_terminal:
  294. # move cursor back down
  295. stream.write("%c[%dB" % (27, diff))
  296. stream.flush()
  297. return all_events
  298. def print_output_event(event, stream, is_terminal):
  299. if 'errorDetail' in event:
  300. raise Exception(event['errorDetail']['message'])
  301. terminator = ''
  302. if is_terminal and 'stream' not in event:
  303. # erase current line
  304. stream.write("%c[2K\r" % 27)
  305. terminator = "\r"
  306. pass
  307. elif 'progressDetail' in event:
  308. return
  309. if 'time' in event:
  310. stream.write("[%s] " % event['time'])
  311. if 'id' in event:
  312. stream.write("%s: " % event['id'])
  313. if 'from' in event:
  314. stream.write("(from %s) " % event['from'])
  315. status = event.get('status', '')
  316. if 'progress' in event:
  317. stream.write("%s %s%s" % (status, event['progress'], terminator))
  318. elif 'progressDetail' in event:
  319. detail = event['progressDetail']
  320. if 'current' in detail:
  321. percentage = float(detail['current']) / float(detail['total']) * 100
  322. stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
  323. else:
  324. stream.write('%s%s' % (status, terminator))
  325. elif 'stream' in event:
  326. stream.write("%s%s" % (event['stream'], terminator))
  327. else:
  328. stream.write("%s%s\n" % (status, terminator))
  329. NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
  330. def is_valid_name(name, one_off=False):
  331. match = NAME_RE.match(name)
  332. if match is None:
  333. return False
  334. if one_off:
  335. return match.group(3) == 'run_'
  336. else:
  337. return match.group(3) is None
  338. def parse_name(name, one_off=False):
  339. match = NAME_RE.match(name)
  340. (project, service_name, _, suffix) = match.groups()
  341. return (project, service_name, int(suffix))
  342. def get_container_name(container):
  343. if not container.get('Name') and not container.get('Names'):
  344. return None
  345. # inspect
  346. if 'Name' in container:
  347. return container['Name']
  348. # ps
  349. for name in container['Names']:
  350. if len(name.split('/')) == 2:
  351. return name[1:]
  352. def split_volume(v):
  353. """
  354. If v is of the format EXTERNAL:INTERNAL, returns (EXTERNAL, INTERNAL).
  355. If v is of the format INTERNAL, returns (None, INTERNAL).
  356. """
  357. if ':' in v:
  358. return v.split(':', 1)
  359. else:
  360. return (None, v)