service.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. from __future__ import unicode_literals
  2. from __future__ import absolute_import
  3. from .packages.docker.client import APIError
  4. import logging
  5. import re
  6. import os
  7. import sys
  8. import json
  9. from .container import Container
  10. log = logging.getLogger(__name__)
  11. DOCKER_CONFIG_KEYS = ['image', 'command', 'hostname', 'user', 'detach', 'stdin_open', 'tty', 'mem_limit', 'ports', 'environment', 'dns', 'volumes', 'volumes_from', 'entrypoint', 'privileged']
  12. DOCKER_CONFIG_HINTS = {
  13. 'link' : 'links',
  14. 'port' : 'ports',
  15. 'privilege' : 'privileged',
  16. 'priviliged': 'privileged',
  17. 'privilige' : 'privileged',
  18. 'volume' : 'volumes',
  19. }
  20. class BuildError(Exception):
  21. def __init__(self, service):
  22. self.service = service
  23. class CannotBeScaledError(Exception):
  24. pass
  25. class ConfigError(ValueError):
  26. pass
  27. class Service(object):
  28. def __init__(self, name, client=None, project='default', links=[], **options):
  29. if not re.match('^[a-zA-Z0-9]+$', name):
  30. raise ConfigError('Invalid name: %s' % name)
  31. if not re.match('^[a-zA-Z0-9]+$', project):
  32. raise ConfigError('Invalid project: %s' % project)
  33. if 'image' in options and 'build' in options:
  34. raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
  35. supported_options = DOCKER_CONFIG_KEYS + ['build', 'expose']
  36. for k in options:
  37. if k not in supported_options:
  38. msg = "Unsupported config option for %s service: '%s'" % (name, k)
  39. if k in DOCKER_CONFIG_HINTS:
  40. msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k]
  41. raise ConfigError(msg)
  42. self.name = name
  43. self.client = client
  44. self.project = project
  45. self.links = links or []
  46. self.options = options
  47. def containers(self, stopped=False, one_off=False):
  48. l = []
  49. for container in self.client.containers(all=stopped):
  50. name = get_container_name(container)
  51. if not name or not is_valid_name(name, one_off):
  52. continue
  53. project, name, number = parse_name(name)
  54. if project == self.project and name == self.name:
  55. l.append(Container.from_ps(self.client, container))
  56. return l
  57. def start(self, **options):
  58. for c in self.containers(stopped=True):
  59. if not c.is_running:
  60. log.info("Starting %s..." % c.name)
  61. self.start_container(c, **options)
  62. def stop(self, **options):
  63. for c in self.containers():
  64. log.info("Stopping %s..." % c.name)
  65. c.stop(**options)
  66. def kill(self, **options):
  67. for c in self.containers():
  68. log.info("Killing %s..." % c.name)
  69. c.kill(**options)
  70. def scale(self, desired_num):
  71. """
  72. Adjusts the number of containers to the specified number and ensures they are running.
  73. - creates containers until there are at least `desired_num`
  74. - stops containers until there are at most `desired_num` running
  75. - starts containers until there are at least `desired_num` running
  76. - removes all stopped containers
  77. """
  78. if not self.can_be_scaled():
  79. raise CannotBeScaledError()
  80. # Create enough containers
  81. containers = self.containers(stopped=True)
  82. while len(containers) < desired_num:
  83. containers.append(self.create_container())
  84. running_containers = []
  85. stopped_containers = []
  86. for c in containers:
  87. if c.is_running:
  88. running_containers.append(c)
  89. else:
  90. stopped_containers.append(c)
  91. running_containers.sort(key=lambda c: c.number)
  92. stopped_containers.sort(key=lambda c: c.number)
  93. # Stop containers
  94. while len(running_containers) > desired_num:
  95. c = running_containers.pop()
  96. log.info("Stopping %s..." % c.name)
  97. c.stop(timeout=1)
  98. stopped_containers.append(c)
  99. # Start containers
  100. while len(running_containers) < desired_num:
  101. c = stopped_containers.pop(0)
  102. log.info("Starting %s..." % c.name)
  103. self.start_container(c)
  104. running_containers.append(c)
  105. self.remove_stopped()
  106. def remove_stopped(self, **options):
  107. for c in self.containers(stopped=True):
  108. if not c.is_running:
  109. log.info("Removing %s..." % c.name)
  110. c.remove(**options)
  111. def create_container(self, one_off=False, **override_options):
  112. """
  113. Create a container for this service. If the image doesn't exist, attempt to pull
  114. it.
  115. """
  116. container_options = self._get_container_create_options(override_options, one_off=one_off)
  117. try:
  118. return Container.create(self.client, **container_options)
  119. except APIError as e:
  120. if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
  121. log.info('Pulling image %s...' % container_options['image'])
  122. output = self.client.pull(container_options['image'], stream=True)
  123. stream_output(output, sys.stdout)
  124. return Container.create(self.client, **container_options)
  125. raise
  126. def recreate_containers(self, **override_options):
  127. """
  128. If a container for this service doesn't exist, create one. If there are
  129. any, stop them and create new ones. Does not remove the old containers.
  130. """
  131. containers = self.containers(stopped=True)
  132. if len(containers) == 0:
  133. log.info("Creating %s..." % self.next_container_name())
  134. return ([], [self.create_container(**override_options)])
  135. else:
  136. old_containers = []
  137. new_containers = []
  138. for c in containers:
  139. log.info("Recreating %s..." % c.name)
  140. (old_container, new_container) = self.recreate_container(c, **override_options)
  141. old_containers.append(old_container)
  142. new_containers.append(new_container)
  143. return (old_containers, new_containers)
  144. def recreate_container(self, container, **override_options):
  145. if container.is_running:
  146. container.stop(timeout=1)
  147. intermediate_container = Container.create(
  148. self.client,
  149. image=container.image,
  150. volumes_from=container.id,
  151. entrypoint=['echo'],
  152. command=[],
  153. )
  154. intermediate_container.start()
  155. intermediate_container.wait()
  156. container.remove()
  157. options = dict(override_options)
  158. options['volumes_from'] = intermediate_container.id
  159. new_container = self.create_container(**options)
  160. return (intermediate_container, new_container)
  161. def start_container(self, container=None, **override_options):
  162. if container is None:
  163. container = self.create_container(**override_options)
  164. options = self.options.copy()
  165. options.update(override_options)
  166. port_bindings = {}
  167. if options.get('ports', None) is not None:
  168. for port in options['ports']:
  169. port = str(port)
  170. if ':' in port:
  171. external_port, internal_port = port.split(':', 1)
  172. else:
  173. external_port, internal_port = (None, port)
  174. port_bindings[internal_port] = external_port
  175. volume_bindings = {}
  176. if options.get('volumes', None) is not None:
  177. for volume in options['volumes']:
  178. if ':' in volume:
  179. external_dir, internal_dir = volume.split(':')
  180. volume_bindings[os.path.abspath(external_dir)] = internal_dir
  181. privileged = options.get('privileged', False)
  182. container.start(
  183. links=self._get_links(link_to_self=override_options.get('one_off', False)),
  184. port_bindings=port_bindings,
  185. binds=volume_bindings,
  186. privileged=privileged,
  187. )
  188. return container
  189. def next_container_name(self, one_off=False):
  190. bits = [self.project, self.name]
  191. if one_off:
  192. bits.append('run')
  193. return '_'.join(bits + [str(self.next_container_number(one_off=one_off))])
  194. def next_container_number(self, one_off=False):
  195. numbers = [parse_name(c.name)[2] for c in self.containers(stopped=True, one_off=one_off)]
  196. if len(numbers) == 0:
  197. return 1
  198. else:
  199. return max(numbers) + 1
  200. def _get_links(self, link_to_self):
  201. links = []
  202. for service, link_name in self.links:
  203. for container in service.containers():
  204. if link_name:
  205. links.append((container.name, link_name))
  206. links.append((container.name, container.name))
  207. links.append((container.name, container.name_without_project))
  208. if link_to_self:
  209. for container in self.containers():
  210. links.append((container.name, container.name))
  211. links.append((container.name, container.name_without_project))
  212. return links
  213. def _get_container_create_options(self, override_options, one_off=False):
  214. container_options = dict((k, self.options[k]) for k in DOCKER_CONFIG_KEYS if k in self.options)
  215. container_options.update(override_options)
  216. container_options['name'] = self.next_container_name(one_off)
  217. if 'ports' in container_options or 'expose' in self.options:
  218. ports = []
  219. all_ports = container_options.get('ports', []) + self.options.get('expose', [])
  220. for port in all_ports:
  221. port = str(port)
  222. if ':' in port:
  223. port = port.split(':')[-1]
  224. if '/' in port:
  225. port = tuple(port.split('/'))
  226. ports.append(port)
  227. container_options['ports'] = ports
  228. if 'volumes' in container_options:
  229. container_options['volumes'] = dict((split_volume(v)[1], {}) for v in container_options['volumes'])
  230. if self.can_be_built():
  231. if len(self.client.images(name=self._build_tag_name())) == 0:
  232. self.build()
  233. container_options['image'] = self._build_tag_name()
  234. # Priviliged is only required for starting containers, not for creating them
  235. if 'privileged' in container_options:
  236. del container_options['privileged']
  237. return container_options
  238. def build(self):
  239. log.info('Building %s...' % self.name)
  240. build_output = self.client.build(
  241. self.options['build'],
  242. tag=self._build_tag_name(),
  243. stream=True
  244. )
  245. all_events = stream_output(build_output, sys.stdout)
  246. image_id = None
  247. for event in all_events:
  248. if 'stream' in event:
  249. match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
  250. if match:
  251. image_id = match.group(1)
  252. if image_id is None:
  253. raise BuildError(self)
  254. return image_id
  255. def can_be_built(self):
  256. return 'build' in self.options
  257. def _build_tag_name(self):
  258. """
  259. The tag to give to images built for this service.
  260. """
  261. return '%s_%s' % (self.project, self.name)
  262. def can_be_scaled(self):
  263. for port in self.options.get('ports', []):
  264. if ':' in str(port):
  265. return False
  266. return True
  267. def stream_output(output, stream):
  268. is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno())
  269. all_events = []
  270. lines = {}
  271. diff = 0
  272. for chunk in output:
  273. event = json.loads(chunk)
  274. all_events.append(event)
  275. if 'progress' in event or 'progressDetail' in event:
  276. image_id = event['id']
  277. if image_id in lines:
  278. diff = len(lines) - lines[image_id]
  279. else:
  280. lines[image_id] = len(lines)
  281. stream.write("\n")
  282. diff = 0
  283. if is_terminal:
  284. # move cursor up `diff` rows
  285. stream.write("%c[%dA" % (27, diff))
  286. try:
  287. print_output_event(event, stream, is_terminal)
  288. except Exception:
  289. stream.write(repr(event) + "\n")
  290. raise
  291. if 'id' in event and is_terminal:
  292. # move cursor back down
  293. stream.write("%c[%dB" % (27, diff))
  294. stream.flush()
  295. return all_events
  296. def print_output_event(event, stream, is_terminal):
  297. if 'errorDetail' in event:
  298. raise Exception(event['errorDetail']['message'])
  299. terminator = ''
  300. if is_terminal and 'stream' not in event:
  301. # erase current line
  302. stream.write("%c[2K\r" % 27)
  303. terminator = "\r"
  304. pass
  305. elif 'progressDetail' in event:
  306. return
  307. if 'time' in event:
  308. stream.write("[%s] " % event['time'])
  309. if 'id' in event:
  310. stream.write("%s: " % event['id'])
  311. if 'from' in event:
  312. stream.write("(from %s) " % event['from'])
  313. status = event.get('status', '')
  314. if 'progress' in event:
  315. stream.write("%s %s%s" % (status, event['progress'], terminator))
  316. elif 'progressDetail' in event:
  317. detail = event['progressDetail']
  318. if 'current' in detail:
  319. percentage = float(detail['current']) / float(detail['total']) * 100
  320. stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
  321. else:
  322. stream.write('%s%s' % (status, terminator))
  323. elif 'stream' in event:
  324. stream.write("%s%s" % (event['stream'], terminator))
  325. else:
  326. stream.write("%s%s\n" % (status, terminator))
  327. NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$')
  328. def is_valid_name(name, one_off=False):
  329. match = NAME_RE.match(name)
  330. if match is None:
  331. return False
  332. if one_off:
  333. return match.group(3) == 'run_'
  334. else:
  335. return match.group(3) is None
  336. def parse_name(name, one_off=False):
  337. match = NAME_RE.match(name)
  338. (project, service_name, _, suffix) = match.groups()
  339. return (project, service_name, int(suffix))
  340. def get_container_name(container):
  341. if not container.get('Name') and not container.get('Names'):
  342. return None
  343. # inspect
  344. if 'Name' in container:
  345. return container['Name']
  346. # ps
  347. for name in container['Names']:
  348. if len(name.split('/')) == 2:
  349. return name[1:]
  350. def split_volume(v):
  351. """
  352. If v is of the format EXTERNAL:INTERNAL, returns (EXTERNAL, INTERNAL).
  353. If v is of the format INTERNAL, returns (None, INTERNAL).
  354. """
  355. if ':' in v:
  356. return v.split(':', 1)
  357. else:
  358. return (None, v)