utils.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. import codecs
  2. import hashlib
  3. import json.decoder
  4. import logging
  5. import ntpath
  6. import random
  7. import six
  8. from docker.errors import DockerException
  9. from docker.utils import parse_bytes as sdk_parse_bytes
  10. from .errors import StreamParseError
  11. from .timeparse import MULTIPLIERS
  12. from .timeparse import timeparse
  13. json_decoder = json.JSONDecoder()
  14. log = logging.getLogger(__name__)
  15. def get_output_stream(stream):
  16. if six.PY3:
  17. return stream
  18. return codecs.getwriter('utf-8')(stream)
  19. def stream_as_text(stream):
  20. """Given a stream of bytes or text, if any of the items in the stream
  21. are bytes convert them to text.
  22. This function can be removed once docker-py returns text streams instead
  23. of byte streams.
  24. """
  25. for data in stream:
  26. if not isinstance(data, six.text_type):
  27. data = data.decode('utf-8', 'replace')
  28. yield data
  29. def line_splitter(buffer, separator=u'\n'):
  30. index = buffer.find(six.text_type(separator))
  31. if index == -1:
  32. return None
  33. return buffer[:index + 1], buffer[index + 1:]
  34. def split_buffer(stream, splitter=None, decoder=lambda a: a):
  35. """Given a generator which yields strings and a splitter function,
  36. joins all input, splits on the separator and yields each chunk.
  37. Unlike string.split(), each chunk includes the trailing
  38. separator, except for the last one if none was found on the end
  39. of the input.
  40. """
  41. splitter = splitter or line_splitter
  42. buffered = six.text_type('')
  43. for data in stream_as_text(stream):
  44. buffered += data
  45. while True:
  46. buffer_split = splitter(buffered)
  47. if buffer_split is None:
  48. break
  49. item, buffered = buffer_split
  50. yield item
  51. if buffered:
  52. try:
  53. yield decoder(buffered)
  54. except Exception as e:
  55. log.error(
  56. 'Compose tried decoding the following data chunk, but failed:'
  57. '\n%s' % repr(buffered)
  58. )
  59. raise StreamParseError(e)
  60. def json_splitter(buffer):
  61. """Attempt to parse a json object from a buffer. If there is at least one
  62. object, return it and the rest of the buffer, otherwise return None.
  63. """
  64. buffer = buffer.strip()
  65. try:
  66. obj, index = json_decoder.raw_decode(buffer)
  67. rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
  68. return obj, rest
  69. except ValueError:
  70. return None
  71. def json_stream(stream):
  72. """Given a stream of text, return a stream of json objects.
  73. This handles streams which are inconsistently buffered (some entries may
  74. be newline delimited, and others are not).
  75. """
  76. return split_buffer(stream, json_splitter, json_decoder.decode)
  77. def json_hash(obj):
  78. dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
  79. h = hashlib.sha256()
  80. h.update(dump.encode('utf8'))
  81. return h.hexdigest()
  82. def microseconds_from_time_nano(time_nano):
  83. return int(time_nano % 1000000000 / 1000)
  84. def nanoseconds_from_time_seconds(time_seconds):
  85. return int(time_seconds / MULTIPLIERS['nano'])
  86. def parse_seconds_float(value):
  87. return timeparse(value or '')
  88. def parse_nanoseconds_int(value):
  89. parsed = timeparse(value or '')
  90. if parsed is None:
  91. return None
  92. return nanoseconds_from_time_seconds(parsed)
  93. def build_string_dict(source_dict):
  94. return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
  95. def splitdrive(path):
  96. if len(path) == 0:
  97. return ('', '')
  98. if path[0] in ['.', '\\', '/', '~']:
  99. return ('', path)
  100. return ntpath.splitdrive(path)
  101. def parse_bytes(n):
  102. try:
  103. return sdk_parse_bytes(n)
  104. except DockerException:
  105. return None
  106. def unquote_path(s):
  107. if not s:
  108. return s
  109. if s[0] == '"' and s[-1] == '"':
  110. return s[1:-1]
  111. return s
  112. def generate_random_id():
  113. while True:
  114. val = hex(random.getrandbits(32 * 8))[2:-1]
  115. try:
  116. int(truncate_id(val))
  117. continue
  118. except ValueError:
  119. return val
  120. def truncate_id(value):
  121. if ':' in value:
  122. value = value[value.index(':') + 1:]
  123. if len(value) > 12:
  124. return value[:12]
  125. return value
  126. def unique_everseen(iterable, key=lambda x: x):
  127. "List unique elements, preserving order. Remember all elements ever seen."
  128. seen = set()
  129. for element in iterable:
  130. unique_key = key(element)
  131. if unique_key not in seen:
  132. seen.add(unique_key)
  133. yield element
  134. def truncate_string(s, max_chars=35):
  135. if len(s) > max_chars:
  136. return s[:max_chars - 2] + '...'
  137. return s