config.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. from __future__ import absolute_import
  2. from __future__ import unicode_literals
  3. import codecs
  4. import logging
  5. import operator
  6. import os
  7. import sys
  8. from collections import namedtuple
  9. import six
  10. import yaml
  11. from ..const import COMPOSEFILE_VERSIONS
  12. from .errors import CircularReference
  13. from .errors import ComposeFileNotFound
  14. from .errors import ConfigurationError
  15. from .interpolation import interpolate_environment_variables
  16. from .sort_services import get_service_name_from_net
  17. from .sort_services import sort_service_dicts
  18. from .types import parse_extra_hosts
  19. from .types import parse_restart_spec
  20. from .types import VolumeFromSpec
  21. from .types import VolumeSpec
  22. from .validation import validate_against_fields_schema
  23. from .validation import validate_against_service_schema
  24. from .validation import validate_extends_file_path
  25. from .validation import validate_top_level_object
  26. from .validation import validate_top_level_service_objects
  27. DOCKER_CONFIG_KEYS = [
  28. 'cap_add',
  29. 'cap_drop',
  30. 'cgroup_parent',
  31. 'command',
  32. 'cpu_quota',
  33. 'cpu_shares',
  34. 'cpuset',
  35. 'detach',
  36. 'devices',
  37. 'dns',
  38. 'dns_search',
  39. 'domainname',
  40. 'entrypoint',
  41. 'env_file',
  42. 'environment',
  43. 'extra_hosts',
  44. 'hostname',
  45. 'image',
  46. 'ipc',
  47. 'labels',
  48. 'links',
  49. 'mac_address',
  50. 'mem_limit',
  51. 'memswap_limit',
  52. 'net',
  53. 'pid',
  54. 'ports',
  55. 'privileged',
  56. 'read_only',
  57. 'restart',
  58. 'security_opt',
  59. 'stdin_open',
  60. 'tty',
  61. 'user',
  62. 'volume_driver',
  63. 'volumes',
  64. 'volumes_from',
  65. 'working_dir',
  66. ]
  67. ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
  68. 'build',
  69. 'container_name',
  70. 'dockerfile',
  71. 'expose',
  72. 'external_links',
  73. 'logging',
  74. ]
  75. DOCKER_VALID_URL_PREFIXES = (
  76. 'http://',
  77. 'https://',
  78. 'git://',
  79. 'github.com/',
  80. 'git@',
  81. )
  82. SUPPORTED_FILENAMES = [
  83. 'docker-compose.yml',
  84. 'docker-compose.yaml',
  85. ]
  86. DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
  87. log = logging.getLogger(__name__)
  88. class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files')):
  89. """
  90. :param working_dir: the directory to use for relative paths in the config
  91. :type working_dir: string
  92. :param config_files: list of configuration files to load
  93. :type config_files: list of :class:`ConfigFile`
  94. """
  95. class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
  96. """
  97. :param filename: filename of the config file
  98. :type filename: string
  99. :param config: contents of the config file
  100. :type config: :class:`dict`
  101. """
  102. @classmethod
  103. def from_filename(cls, filename):
  104. return cls(filename, load_yaml(filename))
  105. def get_service_dicts(self, version):
  106. return self.config if version == 1 else self.config.get('services', {})
  107. class Config(namedtuple('_Config', 'version services volumes')):
  108. """
  109. :param version: configuration version
  110. :type version: int
  111. :param services: List of service description dictionaries
  112. :type services: :class:`list`
  113. :param volumes: List of volume description dictionaries
  114. :type volumes: :class:`list`
  115. """
  116. class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
  117. @classmethod
  118. def with_abs_paths(cls, working_dir, filename, name, config):
  119. if not working_dir:
  120. raise ValueError("No working_dir for ServiceConfig.")
  121. return cls(
  122. os.path.abspath(working_dir),
  123. os.path.abspath(filename) if filename else filename,
  124. name,
  125. config)
  126. def find(base_dir, filenames):
  127. if filenames == ['-']:
  128. return ConfigDetails(
  129. os.getcwd(),
  130. [ConfigFile(None, yaml.safe_load(sys.stdin))])
  131. if filenames:
  132. filenames = [os.path.join(base_dir, f) for f in filenames]
  133. else:
  134. filenames = get_default_config_files(base_dir)
  135. log.debug("Using configuration files: {}".format(",".join(filenames)))
  136. return ConfigDetails(
  137. os.path.dirname(filenames[0]),
  138. [ConfigFile.from_filename(f) for f in filenames])
  139. def get_config_version(config_details):
  140. def get_version(config):
  141. if config.config is None:
  142. return 1
  143. version = config.config.get('version', 1)
  144. if isinstance(version, dict):
  145. # in that case 'version' is probably a service name, so assume
  146. # this is a legacy (version=1) file
  147. version = 1
  148. return version
  149. main_file = config_details.config_files[0]
  150. validate_top_level_object(main_file)
  151. version = get_version(main_file)
  152. for next_file in config_details.config_files[1:]:
  153. validate_top_level_object(next_file)
  154. next_file_version = get_version(next_file)
  155. if version != next_file_version and next_file_version is not None:
  156. raise ConfigurationError(
  157. "Version mismatch: main file {0} specifies version {1} but "
  158. "extension file {2} uses version {3}".format(
  159. main_file.filename, version, next_file.filename, next_file_version
  160. )
  161. )
  162. return version
  163. def get_default_config_files(base_dir):
  164. (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
  165. if not candidates:
  166. raise ComposeFileNotFound(SUPPORTED_FILENAMES)
  167. winner = candidates[0]
  168. if len(candidates) > 1:
  169. log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
  170. log.warn("Using %s\n", winner)
  171. return [os.path.join(path, winner)] + get_default_override_file(path)
  172. def get_default_override_file(path):
  173. override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
  174. return [override_filename] if os.path.exists(override_filename) else []
  175. def find_candidates_in_parent_dirs(filenames, path):
  176. """
  177. Given a directory path to start, looks for filenames in the
  178. directory, and then each parent directory successively,
  179. until found.
  180. Returns tuple (candidates, path).
  181. """
  182. candidates = [filename for filename in filenames
  183. if os.path.exists(os.path.join(path, filename))]
  184. if not candidates:
  185. parent_dir = os.path.join(path, '..')
  186. if os.path.abspath(parent_dir) != os.path.abspath(path):
  187. return find_candidates_in_parent_dirs(filenames, parent_dir)
  188. return (candidates, path)
  189. def load(config_details):
  190. """Load the configuration from a working directory and a list of
  191. configuration files. Files are loaded in order, and merged on top
  192. of each other to create the final configuration.
  193. Return a fully interpolated, extended and validated configuration.
  194. """
  195. version = get_config_version(config_details)
  196. if version not in COMPOSEFILE_VERSIONS:
  197. raise ConfigurationError('Invalid config version provided: {0}'.format(version))
  198. processed_files = []
  199. for config_file in config_details.config_files:
  200. processed_files.append(
  201. process_config_file(config_file, version=version)
  202. )
  203. config_details = config_details._replace(config_files=processed_files)
  204. if version == 1:
  205. service_dicts = load_services(
  206. config_details.working_dir, config_details.config_files,
  207. version
  208. )
  209. volumes = {}
  210. elif version == 2:
  211. config_files = [
  212. ConfigFile(f.filename, f.config.get('services', {}))
  213. for f in config_details.config_files
  214. ]
  215. service_dicts = load_services(
  216. config_details.working_dir, config_files, version
  217. )
  218. volumes = load_volumes(config_details.config_files)
  219. return Config(version, service_dicts, volumes)
  220. def load_volumes(config_files):
  221. volumes = {}
  222. for config_file in config_files:
  223. for name, volume_config in config_file.config.get('volumes', {}).items():
  224. volumes.update({name: volume_config})
  225. return volumes
  226. def load_services(working_dir, config_files, version):
  227. def build_service(filename, service_name, service_dict):
  228. service_config = ServiceConfig.with_abs_paths(
  229. working_dir,
  230. filename,
  231. service_name,
  232. service_dict)
  233. resolver = ServiceExtendsResolver(service_config, version)
  234. service_dict = process_service(resolver.run())
  235. # TODO: move to validate_service()
  236. validate_against_service_schema(service_dict, service_config.name, version)
  237. validate_paths(service_dict)
  238. service_dict = finalize_service(service_config._replace(config=service_dict))
  239. service_dict['name'] = service_config.name
  240. return service_dict
  241. def build_services(config_file):
  242. return sort_service_dicts([
  243. build_service(config_file.filename, name, service_dict)
  244. for name, service_dict in config_file.config.items()
  245. ])
  246. def merge_services(base, override):
  247. all_service_names = set(base) | set(override)
  248. return {
  249. name: merge_service_dicts_from_files(
  250. base.get(name, {}),
  251. override.get(name, {}),
  252. version)
  253. for name in all_service_names
  254. }
  255. config_file = config_files[0]
  256. for next_file in config_files[1:]:
  257. config = merge_services(config_file.config, next_file.config)
  258. config_file = config_file._replace(config=config)
  259. return build_services(config_file)
  260. def process_config_file(config_file, version, service_name=None):
  261. service_dicts = config_file.get_service_dicts(version)
  262. validate_top_level_service_objects(
  263. config_file.filename, service_dicts
  264. )
  265. interpolated_config = interpolate_environment_variables(service_dicts)
  266. if version == 2:
  267. processed_config = dict(config_file.config)
  268. processed_config.update({'services': interpolated_config})
  269. if version == 1:
  270. processed_config = interpolated_config
  271. validate_against_fields_schema(
  272. processed_config, config_file.filename, version
  273. )
  274. if service_name and service_name not in processed_config:
  275. raise ConfigurationError(
  276. "Cannot extend service '{}' in {}: Service not found".format(
  277. service_name, config_file.filename))
  278. return config_file._replace(config=processed_config)
  279. class ServiceExtendsResolver(object):
  280. def __init__(self, service_config, version, already_seen=None):
  281. self.service_config = service_config
  282. self.working_dir = service_config.working_dir
  283. self.already_seen = already_seen or []
  284. self.version = version
  285. @property
  286. def signature(self):
  287. return self.service_config.filename, self.service_config.name
  288. def detect_cycle(self):
  289. if self.signature in self.already_seen:
  290. raise CircularReference(self.already_seen + [self.signature])
  291. def run(self):
  292. self.detect_cycle()
  293. if 'extends' in self.service_config.config:
  294. service_dict = self.resolve_extends(*self.validate_and_construct_extends())
  295. return self.service_config._replace(config=service_dict)
  296. return self.service_config
  297. def validate_and_construct_extends(self):
  298. extends = self.service_config.config['extends']
  299. if not isinstance(extends, dict):
  300. extends = {'service': extends}
  301. config_path = self.get_extended_config_path(extends)
  302. service_name = extends['service']
  303. extended_file = process_config_file(
  304. ConfigFile.from_filename(config_path),
  305. version=self.version, service_name=service_name
  306. )
  307. service_config = extended_file.config[service_name]
  308. return config_path, service_config, service_name
  309. def resolve_extends(self, extended_config_path, service_dict, service_name):
  310. resolver = ServiceExtendsResolver(
  311. ServiceConfig.with_abs_paths(
  312. os.path.dirname(extended_config_path),
  313. extended_config_path,
  314. service_name,
  315. service_dict),
  316. self.version,
  317. already_seen=self.already_seen + [self.signature])
  318. service_config = resolver.run()
  319. other_service_dict = process_service(service_config)
  320. validate_extended_service_dict(
  321. other_service_dict,
  322. extended_config_path,
  323. service_name,
  324. )
  325. return merge_service_dicts(
  326. other_service_dict,
  327. self.service_config.config,
  328. self.version)
  329. def get_extended_config_path(self, extends_options):
  330. """Service we are extending either has a value for 'file' set, which we
  331. need to obtain a full path too or we are extending from a service
  332. defined in our own file.
  333. """
  334. filename = self.service_config.filename
  335. validate_extends_file_path(
  336. self.service_config.name,
  337. extends_options,
  338. filename)
  339. if 'file' in extends_options:
  340. return expand_path(self.working_dir, extends_options['file'])
  341. return filename
  342. def resolve_environment(service_dict):
  343. """Unpack any environment variables from an env_file, if set.
  344. Interpolate environment values if set.
  345. """
  346. env = {}
  347. for env_file in service_dict.get('env_file', []):
  348. env.update(env_vars_from_file(env_file))
  349. env.update(parse_environment(service_dict.get('environment')))
  350. return dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
  351. def validate_extended_service_dict(service_dict, filename, service):
  352. error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
  353. if 'links' in service_dict:
  354. raise ConfigurationError(
  355. "%s services with 'links' cannot be extended" % error_prefix)
  356. if 'volumes_from' in service_dict:
  357. raise ConfigurationError(
  358. "%s services with 'volumes_from' cannot be extended" % error_prefix)
  359. if 'net' in service_dict:
  360. if get_service_name_from_net(service_dict['net']) is not None:
  361. raise ConfigurationError(
  362. "%s services with 'net: container' cannot be extended" % error_prefix)
  363. def validate_ulimits(ulimit_config):
  364. for limit_name, soft_hard_values in six.iteritems(ulimit_config):
  365. if isinstance(soft_hard_values, dict):
  366. if not soft_hard_values['soft'] <= soft_hard_values['hard']:
  367. raise ConfigurationError(
  368. "ulimit_config \"{}\" cannot contain a 'soft' value higher "
  369. "than 'hard' value".format(ulimit_config))
  370. # TODO: rename to normalize_service
  371. def process_service(service_config):
  372. working_dir = service_config.working_dir
  373. service_dict = dict(service_config.config)
  374. if 'env_file' in service_dict:
  375. service_dict['env_file'] = [
  376. expand_path(working_dir, path)
  377. for path in to_list(service_dict['env_file'])
  378. ]
  379. if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
  380. service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
  381. if 'build' in service_dict:
  382. service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
  383. if 'labels' in service_dict:
  384. service_dict['labels'] = parse_labels(service_dict['labels'])
  385. if 'extra_hosts' in service_dict:
  386. service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
  387. for field in ['dns', 'dns_search']:
  388. if field in service_dict:
  389. service_dict[field] = to_list(service_dict[field])
  390. # TODO: move to a validate_service()
  391. if 'ulimits' in service_dict:
  392. validate_ulimits(service_dict['ulimits'])
  393. return service_dict
  394. def finalize_service(service_config):
  395. service_dict = dict(service_config.config)
  396. if 'environment' in service_dict or 'env_file' in service_dict:
  397. service_dict['environment'] = resolve_environment(service_dict)
  398. service_dict.pop('env_file', None)
  399. if 'volumes_from' in service_dict:
  400. service_dict['volumes_from'] = [
  401. VolumeFromSpec.parse(vf) for vf in service_dict['volumes_from']]
  402. if 'volumes' in service_dict:
  403. service_dict['volumes'] = [
  404. VolumeSpec.parse(v) for v in service_dict['volumes']]
  405. if 'restart' in service_dict:
  406. service_dict['restart'] = parse_restart_spec(service_dict['restart'])
  407. return normalize_v1_service_format(service_dict)
  408. def normalize_v1_service_format(service_dict):
  409. if 'log_driver' in service_dict or 'log_opt' in service_dict:
  410. if 'logging' not in service_dict:
  411. service_dict['logging'] = {}
  412. if 'log_driver' in service_dict:
  413. service_dict['logging']['driver'] = service_dict['log_driver']
  414. del service_dict['log_driver']
  415. if 'log_opt' in service_dict:
  416. service_dict['logging']['options'] = service_dict['log_opt']
  417. del service_dict['log_opt']
  418. return service_dict
  419. def merge_service_dicts_from_files(base, override, version):
  420. """When merging services from multiple files we need to merge the `extends`
  421. field. This is not handled by `merge_service_dicts()` which is used to
  422. perform the `extends`.
  423. """
  424. new_service = merge_service_dicts(base, override, version)
  425. if 'extends' in override:
  426. new_service['extends'] = override['extends']
  427. elif 'extends' in base:
  428. new_service['extends'] = base['extends']
  429. return new_service
  430. def merge_service_dicts(base, override, version):
  431. d = {}
  432. def merge_field(field, merge_func, default=None):
  433. if field in base or field in override:
  434. d[field] = merge_func(
  435. base.get(field, default),
  436. override.get(field, default))
  437. merge_field('environment', merge_environment)
  438. merge_field('labels', merge_labels)
  439. for field in ['volumes', 'devices']:
  440. merge_field(field, merge_path_mappings)
  441. for field in ['ports', 'expose', 'external_links']:
  442. merge_field(field, operator.add, default=[])
  443. for field in ['dns', 'dns_search', 'env_file']:
  444. merge_field(field, merge_list_or_string)
  445. for field in set(ALLOWED_KEYS) - set(d):
  446. if field in base or field in override:
  447. d[field] = override.get(field, base.get(field))
  448. if version == 1:
  449. legacy_v1_merge_image_or_build(d, base, override)
  450. return d
  451. def legacy_v1_merge_image_or_build(output, base, override):
  452. output.pop('image', None)
  453. output.pop('build', None)
  454. if 'image' in override:
  455. output['image'] = override['image']
  456. elif 'build' in override:
  457. output['build'] = override['build']
  458. elif 'image' in base:
  459. output['image'] = base['image']
  460. elif 'build' in base:
  461. output['build'] = base['build']
  462. def merge_environment(base, override):
  463. env = parse_environment(base)
  464. env.update(parse_environment(override))
  465. return env
  466. def parse_environment(environment):
  467. if not environment:
  468. return {}
  469. if isinstance(environment, list):
  470. return dict(split_env(e) for e in environment)
  471. if isinstance(environment, dict):
  472. return dict(environment)
  473. raise ConfigurationError(
  474. "environment \"%s\" must be a list or mapping," %
  475. environment
  476. )
  477. def split_env(env):
  478. if isinstance(env, six.binary_type):
  479. env = env.decode('utf-8', 'replace')
  480. if '=' in env:
  481. return env.split('=', 1)
  482. else:
  483. return env, None
  484. def resolve_env_var(key, val):
  485. if val is not None:
  486. return key, val
  487. elif key in os.environ:
  488. return key, os.environ[key]
  489. else:
  490. return key, ''
  491. def env_vars_from_file(filename):
  492. """
  493. Read in a line delimited file of environment variables.
  494. """
  495. if not os.path.exists(filename):
  496. raise ConfigurationError("Couldn't find env file: %s" % filename)
  497. env = {}
  498. for line in codecs.open(filename, 'r', 'utf-8'):
  499. line = line.strip()
  500. if line and not line.startswith('#'):
  501. k, v = split_env(line)
  502. env[k] = v
  503. return env
  504. def resolve_volume_paths(working_dir, service_dict):
  505. return [
  506. resolve_volume_path(working_dir, volume)
  507. for volume in service_dict['volumes']
  508. ]
  509. def resolve_volume_path(working_dir, volume):
  510. container_path, host_path = split_path_mapping(volume)
  511. if host_path is not None:
  512. if host_path.startswith('.'):
  513. host_path = expand_path(working_dir, host_path)
  514. host_path = os.path.expanduser(host_path)
  515. return u"{}:{}".format(host_path, container_path)
  516. else:
  517. return container_path
  518. def resolve_build_path(working_dir, build_path):
  519. if is_url(build_path):
  520. return build_path
  521. return expand_path(working_dir, build_path)
  522. def is_url(build_path):
  523. return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
  524. def validate_paths(service_dict):
  525. if 'build' in service_dict:
  526. build_path = service_dict['build']
  527. if (
  528. not is_url(build_path) and
  529. (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
  530. ):
  531. raise ConfigurationError(
  532. "build path %s either does not exist, is not accessible, "
  533. "or is not a valid URL." % build_path)
  534. def merge_path_mappings(base, override):
  535. d = dict_from_path_mappings(base)
  536. d.update(dict_from_path_mappings(override))
  537. return path_mappings_from_dict(d)
  538. def dict_from_path_mappings(path_mappings):
  539. if path_mappings:
  540. return dict(split_path_mapping(v) for v in path_mappings)
  541. else:
  542. return {}
  543. def path_mappings_from_dict(d):
  544. return [join_path_mapping(v) for v in d.items()]
  545. def split_path_mapping(volume_path):
  546. """
  547. Ascertain if the volume_path contains a host path as well as a container
  548. path. Using splitdrive so windows absolute paths won't cause issues with
  549. splitting on ':'.
  550. """
  551. # splitdrive has limitations when it comes to relative paths, so when it's
  552. # relative, handle special case to set the drive to ''
  553. if volume_path.startswith('.') or volume_path.startswith('~'):
  554. drive, volume_config = '', volume_path
  555. else:
  556. drive, volume_config = os.path.splitdrive(volume_path)
  557. if ':' in volume_config:
  558. (host, container) = volume_config.split(':', 1)
  559. return (container, drive + host)
  560. else:
  561. return (volume_path, None)
  562. def join_path_mapping(pair):
  563. (container, host) = pair
  564. if host is None:
  565. return container
  566. else:
  567. return ":".join((host, container))
  568. def merge_labels(base, override):
  569. labels = parse_labels(base)
  570. labels.update(parse_labels(override))
  571. return labels
  572. def parse_labels(labels):
  573. if not labels:
  574. return {}
  575. if isinstance(labels, list):
  576. return dict(split_label(e) for e in labels)
  577. if isinstance(labels, dict):
  578. return dict(labels)
  579. def split_label(label):
  580. if '=' in label:
  581. return label.split('=', 1)
  582. else:
  583. return label, ''
  584. def expand_path(working_dir, path):
  585. return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
  586. def merge_list_or_string(base, override):
  587. return to_list(base) + to_list(override)
  588. def to_list(value):
  589. if value is None:
  590. return []
  591. elif isinstance(value, six.string_types):
  592. return [value]
  593. else:
  594. return value
  595. def load_yaml(filename):
  596. try:
  597. with open(filename, 'r') as fh:
  598. return yaml.safe_load(fh)
  599. except (IOError, yaml.YAMLError) as e:
  600. error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
  601. raise ConfigurationError(u"{}: {}".format(error_name, e))