utils.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. import hashlib
  2. import json.decoder
  3. import logging
  4. import ntpath
  5. import random
  6. from docker.errors import DockerException
  7. from docker.utils import parse_bytes as sdk_parse_bytes
  8. from .errors import StreamParseError
  9. from .timeparse import MULTIPLIERS
  10. from .timeparse import timeparse
  11. json_decoder = json.JSONDecoder()
  12. log = logging.getLogger(__name__)
  13. def stream_as_text(stream):
  14. """Given a stream of bytes or text, if any of the items in the stream
  15. are bytes convert them to text.
  16. This function can be removed once docker-py returns text streams instead
  17. of byte streams.
  18. """
  19. for data in stream:
  20. if not isinstance(data, str):
  21. data = data.decode('utf-8', 'replace')
  22. yield data
  23. def line_splitter(buffer, separator='\n'):
  24. index = buffer.find(str(separator))
  25. if index == -1:
  26. return None
  27. return buffer[:index + 1], buffer[index + 1:]
  28. def split_buffer(stream, splitter=None, decoder=lambda a: a):
  29. """Given a generator which yields strings and a splitter function,
  30. joins all input, splits on the separator and yields each chunk.
  31. Unlike string.split(), each chunk includes the trailing
  32. separator, except for the last one if none was found on the end
  33. of the input.
  34. """
  35. splitter = splitter or line_splitter
  36. buffered = ''
  37. for data in stream_as_text(stream):
  38. buffered += data
  39. while True:
  40. buffer_split = splitter(buffered)
  41. if buffer_split is None:
  42. break
  43. item, buffered = buffer_split
  44. yield item
  45. if buffered:
  46. try:
  47. yield decoder(buffered)
  48. except Exception as e:
  49. log.error(
  50. 'Compose tried decoding the following data chunk, but failed:'
  51. '\n%s' % repr(buffered)
  52. )
  53. raise StreamParseError(e)
  54. def json_splitter(buffer):
  55. """Attempt to parse a json object from a buffer. If there is at least one
  56. object, return it and the rest of the buffer, otherwise return None.
  57. """
  58. buffer = buffer.strip()
  59. try:
  60. obj, index = json_decoder.raw_decode(buffer)
  61. rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
  62. return obj, rest
  63. except ValueError:
  64. return None
  65. def json_stream(stream):
  66. """Given a stream of text, return a stream of json objects.
  67. This handles streams which are inconsistently buffered (some entries may
  68. be newline delimited, and others are not).
  69. """
  70. return split_buffer(stream, json_splitter, json_decoder.decode)
  71. def json_hash(obj):
  72. dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
  73. h = hashlib.sha256()
  74. h.update(dump.encode('utf8'))
  75. return h.hexdigest()
  76. def microseconds_from_time_nano(time_nano):
  77. return int(time_nano % 1000000000 / 1000)
  78. def nanoseconds_from_time_seconds(time_seconds):
  79. return int(time_seconds / MULTIPLIERS['nano'])
  80. def parse_seconds_float(value):
  81. return timeparse(value or '')
  82. def parse_nanoseconds_int(value):
  83. parsed = timeparse(value or '')
  84. if parsed is None:
  85. return None
  86. return nanoseconds_from_time_seconds(parsed)
  87. def build_string_dict(source_dict):
  88. return {k: str(v if v is not None else '') for k, v in source_dict.items()}
  89. def splitdrive(path):
  90. if len(path) == 0:
  91. return ('', '')
  92. if path[0] in ['.', '\\', '/', '~']:
  93. return ('', path)
  94. return ntpath.splitdrive(path)
  95. def parse_bytes(n):
  96. try:
  97. return sdk_parse_bytes(n)
  98. except DockerException:
  99. return None
  100. def unquote_path(s):
  101. if not s:
  102. return s
  103. if s[0] == '"' and s[-1] == '"':
  104. return s[1:-1]
  105. return s
  106. def generate_random_id():
  107. while True:
  108. val = hex(random.getrandbits(32 * 8))[2:-1]
  109. try:
  110. int(truncate_id(val))
  111. continue
  112. except ValueError:
  113. return val
  114. def truncate_id(value):
  115. if ':' in value:
  116. value = value[value.index(':') + 1:]
  117. if len(value) > 12:
  118. return value[:12]
  119. return value
  120. def unique_everseen(iterable, key=lambda x: x):
  121. "List unique elements, preserving order. Remember all elements ever seen."
  122. seen = set()
  123. for element in iterable:
  124. unique_key = key(element)
  125. if unique_key not in seen:
  126. seen.add(unique_key)
  127. yield element
  128. def truncate_string(s, max_chars=35):
  129. if len(s) > max_chars:
  130. return s[:max_chars - 2] + '...'
  131. return s
  132. def filter_attached_for_up(items, service_names, attach_dependencies=False,
  133. item_to_service_name=lambda x: x):
  134. """This function contains the logic of choosing which services to
  135. attach when doing docker-compose up. It may be used both with containers
  136. and services, and any other entities that map to service names -
  137. this mapping is provided by item_to_service_name."""
  138. if attach_dependencies or not service_names:
  139. return items
  140. return [
  141. item
  142. for item in items if item_to_service_name(item) in service_names
  143. ]