1
0

log_printer.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. from __future__ import absolute_import
  2. from __future__ import unicode_literals
  3. import sys
  4. from collections import namedtuple
  5. from itertools import cycle
  6. from threading import Thread
  7. from docker.errors import APIError
  8. from six.moves import _thread as thread
  9. from six.moves.queue import Empty
  10. from six.moves.queue import Queue
  11. from . import colors
  12. from compose import utils
  13. from compose.cli.signals import ShutdownException
  14. from compose.utils import split_buffer
  15. class LogPresenter(object):
  16. def __init__(self, prefix_width, color_func):
  17. self.prefix_width = prefix_width
  18. self.color_func = color_func
  19. def present(self, container, line):
  20. prefix = container.name_without_project.ljust(self.prefix_width)
  21. return '{prefix} {line}'.format(
  22. prefix=self.color_func(prefix + ' |'),
  23. line=line)
  24. def build_log_presenters(service_names, monochrome):
  25. """Return an iterable of functions.
  26. Each function can be used to format the logs output of a container.
  27. """
  28. prefix_width = max_name_width(service_names)
  29. def no_color(text):
  30. return text
  31. for color_func in cycle([no_color] if monochrome else colors.rainbow()):
  32. yield LogPresenter(prefix_width, color_func)
  33. def max_name_width(service_names, max_index_width=3):
  34. """Calculate the maximum width of container names so we can make the log
  35. prefixes line up like so:
  36. db_1 | Listening
  37. web_1 | Listening
  38. """
  39. return max(len(name) for name in service_names) + max_index_width
  40. class LogPrinter(object):
  41. """Print logs from many containers to a single output stream."""
  42. def __init__(self,
  43. containers,
  44. presenters,
  45. event_stream,
  46. output=sys.stdout,
  47. cascade_stop=False,
  48. log_args=None):
  49. self.containers = containers
  50. self.presenters = presenters
  51. self.event_stream = event_stream
  52. self.output = utils.get_output_stream(output)
  53. self.cascade_stop = cascade_stop
  54. self.log_args = log_args or {}
  55. def run(self):
  56. if not self.containers:
  57. return
  58. queue = Queue()
  59. thread_args = queue, self.log_args
  60. thread_map = build_thread_map(self.containers, self.presenters, thread_args)
  61. start_producer_thread((
  62. thread_map,
  63. self.event_stream,
  64. self.presenters,
  65. thread_args))
  66. for line in consume_queue(queue, self.cascade_stop):
  67. remove_stopped_threads(thread_map)
  68. if not line:
  69. if not thread_map:
  70. # There are no running containers left to tail, so exit
  71. return
  72. # We got an empty line because of a timeout, but there are still
  73. # active containers to tail, so continue
  74. continue
  75. self.output.write(line)
  76. self.output.flush()
  77. def remove_stopped_threads(thread_map):
  78. for container_id, tailer_thread in list(thread_map.items()):
  79. if not tailer_thread.is_alive():
  80. thread_map.pop(container_id, None)
  81. def build_thread(container, presenter, queue, log_args):
  82. tailer = Thread(
  83. target=tail_container_logs,
  84. args=(container, presenter, queue, log_args))
  85. tailer.daemon = True
  86. tailer.start()
  87. return tailer
  88. def build_thread_map(initial_containers, presenters, thread_args):
  89. return {
  90. container.id: build_thread(container, next(presenters), *thread_args)
  91. for container in initial_containers
  92. }
  93. class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
  94. @classmethod
  95. def new(cls, item):
  96. return cls(item, None, None)
  97. @classmethod
  98. def exception(cls, exc):
  99. return cls(None, None, exc)
  100. @classmethod
  101. def stop(cls):
  102. return cls(None, True, None)
  103. def tail_container_logs(container, presenter, queue, log_args):
  104. generator = get_log_generator(container)
  105. try:
  106. for item in generator(container, log_args):
  107. queue.put(QueueItem.new(presenter.present(container, item)))
  108. except Exception as e:
  109. queue.put(QueueItem.exception(e))
  110. return
  111. if log_args.get('follow'):
  112. queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
  113. queue.put(QueueItem.stop())
  114. def get_log_generator(container):
  115. if container.has_api_logs:
  116. return build_log_generator
  117. return build_no_log_generator
  118. def build_no_log_generator(container, log_args):
  119. """Return a generator that prints a warning about logs and waits for
  120. container to exit.
  121. """
  122. yield "WARNING: no logs are available with the '{}' log driver\n".format(
  123. container.log_driver)
  124. def build_log_generator(container, log_args):
  125. # if the container doesn't have a log_stream we need to attach to container
  126. # before log printer starts running
  127. if container.log_stream is None:
  128. stream = container.logs(stdout=True, stderr=True, stream=True, **log_args)
  129. else:
  130. stream = container.log_stream
  131. return split_buffer(stream)
  132. def wait_on_exit(container):
  133. try:
  134. exit_code = container.wait()
  135. return "%s exited with code %s\n" % (container.name, exit_code)
  136. except APIError as e:
  137. return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
  138. container.name, e.response.status_code,
  139. e.response.text or '[empty]'
  140. )
  141. def start_producer_thread(thread_args):
  142. producer = Thread(target=watch_events, args=thread_args)
  143. producer.daemon = True
  144. producer.start()
  145. def watch_events(thread_map, event_stream, presenters, thread_args):
  146. for event in event_stream:
  147. if event['action'] == 'stop':
  148. thread_map.pop(event['id'], None)
  149. if event['action'] != 'start':
  150. continue
  151. if event['id'] in thread_map:
  152. if thread_map[event['id']].is_alive():
  153. continue
  154. # Container was stopped and started, we need a new thread
  155. thread_map.pop(event['id'], None)
  156. thread_map[event['id']] = build_thread(
  157. event['container'],
  158. next(presenters),
  159. *thread_args)
  160. def consume_queue(queue, cascade_stop):
  161. """Consume the queue by reading lines off of it and yielding them."""
  162. while True:
  163. try:
  164. item = queue.get(timeout=0.1)
  165. except Empty:
  166. yield None
  167. continue
  168. # See https://github.com/docker/compose/issues/189
  169. except thread.error:
  170. raise ShutdownException()
  171. if item.exc:
  172. raise item.exc
  173. if item.is_stop:
  174. if cascade_stop:
  175. raise StopIteration
  176. else:
  177. continue
  178. yield item.item