config.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. from __future__ import absolute_import
  2. from __future__ import unicode_literals
  3. import codecs
  4. import functools
  5. import logging
  6. import operator
  7. import os
  8. import string
  9. import sys
  10. from collections import namedtuple
  11. import six
  12. import yaml
  13. from cached_property import cached_property
  14. from ..const import COMPOSEFILE_V1 as V1
  15. from ..const import COMPOSEFILE_V2_0 as V2_0
  16. from ..utils import build_string_dict
  17. from .errors import CircularReference
  18. from .errors import ComposeFileNotFound
  19. from .errors import ConfigurationError
  20. from .errors import VERSION_EXPLANATION
  21. from .interpolation import interpolate_environment_variables
  22. from .sort_services import get_container_name_from_network_mode
  23. from .sort_services import get_service_name_from_network_mode
  24. from .sort_services import sort_service_dicts
  25. from .types import parse_extra_hosts
  26. from .types import parse_restart_spec
  27. from .types import ServiceLink
  28. from .types import VolumeFromSpec
  29. from .types import VolumeSpec
  30. from .validation import match_named_volumes
  31. from .validation import validate_against_config_schema
  32. from .validation import validate_config_section
  33. from .validation import validate_depends_on
  34. from .validation import validate_extends_file_path
  35. from .validation import validate_network_mode
  36. from .validation import validate_service_constraints
  37. from .validation import validate_top_level_object
  38. from .validation import validate_ulimits
  39. DOCKER_CONFIG_KEYS = [
  40. 'cap_add',
  41. 'cap_drop',
  42. 'cgroup_parent',
  43. 'command',
  44. 'cpu_quota',
  45. 'cpu_shares',
  46. 'cpuset',
  47. 'detach',
  48. 'devices',
  49. 'dns',
  50. 'dns_search',
  51. 'domainname',
  52. 'entrypoint',
  53. 'env_file',
  54. 'environment',
  55. 'extra_hosts',
  56. 'hostname',
  57. 'image',
  58. 'ipc',
  59. 'labels',
  60. 'links',
  61. 'mac_address',
  62. 'mem_limit',
  63. 'memswap_limit',
  64. 'net',
  65. 'pid',
  66. 'ports',
  67. 'privileged',
  68. 'read_only',
  69. 'restart',
  70. 'security_opt',
  71. 'shm_size',
  72. 'stdin_open',
  73. 'stop_signal',
  74. 'tty',
  75. 'user',
  76. 'volume_driver',
  77. 'volumes',
  78. 'volumes_from',
  79. 'working_dir',
  80. ]
  81. ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
  82. 'build',
  83. 'container_name',
  84. 'dockerfile',
  85. 'log_driver',
  86. 'log_opt',
  87. 'logging',
  88. 'network_mode',
  89. ]
  90. DOCKER_VALID_URL_PREFIXES = (
  91. 'http://',
  92. 'https://',
  93. 'git://',
  94. 'github.com/',
  95. 'git@',
  96. )
  97. SUPPORTED_FILENAMES = [
  98. 'docker-compose.yml',
  99. 'docker-compose.yaml',
  100. ]
  101. DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
  102. log = logging.getLogger(__name__)
  103. class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files')):
  104. """
  105. :param working_dir: the directory to use for relative paths in the config
  106. :type working_dir: string
  107. :param config_files: list of configuration files to load
  108. :type config_files: list of :class:`ConfigFile`
  109. """
  110. class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
  111. """
  112. :param filename: filename of the config file
  113. :type filename: string
  114. :param config: contents of the config file
  115. :type config: :class:`dict`
  116. """
  117. @classmethod
  118. def from_filename(cls, filename):
  119. return cls(filename, load_yaml(filename))
  120. @cached_property
  121. def version(self):
  122. if 'version' not in self.config:
  123. return V1
  124. version = self.config['version']
  125. if isinstance(version, dict):
  126. log.warn('Unexpected type for "version" key in "{}". Assuming '
  127. '"version" is the name of a service, and defaulting to '
  128. 'Compose file version 1.'.format(self.filename))
  129. return V1
  130. if not isinstance(version, six.string_types):
  131. raise ConfigurationError(
  132. 'Version in "{}" is invalid - it should be a string.'
  133. .format(self.filename))
  134. if version == '1':
  135. raise ConfigurationError(
  136. 'Version in "{}" is invalid. {}'
  137. .format(self.filename, VERSION_EXPLANATION))
  138. if version == '2':
  139. version = V2_0
  140. if version != V2_0:
  141. raise ConfigurationError(
  142. 'Version in "{}" is unsupported. {}'
  143. .format(self.filename, VERSION_EXPLANATION))
  144. return version
  145. def get_service(self, name):
  146. return self.get_service_dicts()[name]
  147. def get_service_dicts(self):
  148. return self.config if self.version == V1 else self.config.get('services', {})
  149. def get_volumes(self):
  150. return {} if self.version == V1 else self.config.get('volumes', {})
  151. def get_networks(self):
  152. return {} if self.version == V1 else self.config.get('networks', {})
  153. class Config(namedtuple('_Config', 'version services volumes networks')):
  154. """
  155. :param version: configuration version
  156. :type version: int
  157. :param services: List of service description dictionaries
  158. :type services: :class:`list`
  159. :param volumes: Dictionary mapping volume names to description dictionaries
  160. :type volumes: :class:`dict`
  161. :param networks: Dictionary mapping network names to description dictionaries
  162. :type networks: :class:`dict`
  163. """
  164. class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
  165. @classmethod
  166. def with_abs_paths(cls, working_dir, filename, name, config):
  167. if not working_dir:
  168. raise ValueError("No working_dir for ServiceConfig.")
  169. return cls(
  170. os.path.abspath(working_dir),
  171. os.path.abspath(filename) if filename else filename,
  172. name,
  173. config)
  174. def find(base_dir, filenames):
  175. if filenames == ['-']:
  176. return ConfigDetails(
  177. os.getcwd(),
  178. [ConfigFile(None, yaml.safe_load(sys.stdin))])
  179. if filenames:
  180. filenames = [os.path.join(base_dir, f) for f in filenames]
  181. else:
  182. filenames = get_default_config_files(base_dir)
  183. log.debug("Using configuration files: {}".format(",".join(filenames)))
  184. return ConfigDetails(
  185. os.path.dirname(filenames[0]),
  186. [ConfigFile.from_filename(f) for f in filenames])
  187. def validate_config_version(config_files):
  188. main_file = config_files[0]
  189. validate_top_level_object(main_file)
  190. for next_file in config_files[1:]:
  191. validate_top_level_object(next_file)
  192. if main_file.version != next_file.version:
  193. raise ConfigurationError(
  194. "Version mismatch: file {0} specifies version {1} but "
  195. "extension file {2} uses version {3}".format(
  196. main_file.filename,
  197. main_file.version,
  198. next_file.filename,
  199. next_file.version))
  200. def get_default_config_files(base_dir):
  201. (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
  202. if not candidates:
  203. raise ComposeFileNotFound(SUPPORTED_FILENAMES)
  204. winner = candidates[0]
  205. if len(candidates) > 1:
  206. log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
  207. log.warn("Using %s\n", winner)
  208. return [os.path.join(path, winner)] + get_default_override_file(path)
  209. def get_default_override_file(path):
  210. override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
  211. return [override_filename] if os.path.exists(override_filename) else []
  212. def find_candidates_in_parent_dirs(filenames, path):
  213. """
  214. Given a directory path to start, looks for filenames in the
  215. directory, and then each parent directory successively,
  216. until found.
  217. Returns tuple (candidates, path).
  218. """
  219. candidates = [filename for filename in filenames
  220. if os.path.exists(os.path.join(path, filename))]
  221. if not candidates:
  222. parent_dir = os.path.join(path, '..')
  223. if os.path.abspath(parent_dir) != os.path.abspath(path):
  224. return find_candidates_in_parent_dirs(filenames, parent_dir)
  225. return (candidates, path)
  226. def load(config_details):
  227. """Load the configuration from a working directory and a list of
  228. configuration files. Files are loaded in order, and merged on top
  229. of each other to create the final configuration.
  230. Return a fully interpolated, extended and validated configuration.
  231. """
  232. validate_config_version(config_details.config_files)
  233. processed_files = [
  234. process_config_file(config_file)
  235. for config_file in config_details.config_files
  236. ]
  237. config_details = config_details._replace(config_files=processed_files)
  238. main_file = config_details.config_files[0]
  239. volumes = load_mapping(
  240. config_details.config_files, 'get_volumes', 'Volume'
  241. )
  242. networks = load_mapping(
  243. config_details.config_files, 'get_networks', 'Network'
  244. )
  245. service_dicts = load_services(
  246. config_details.working_dir,
  247. main_file,
  248. [file.get_service_dicts() for file in config_details.config_files])
  249. if main_file.version != V1:
  250. for service_dict in service_dicts:
  251. match_named_volumes(service_dict, volumes)
  252. return Config(main_file.version, service_dicts, volumes, networks)
  253. def load_mapping(config_files, get_func, entity_type):
  254. mapping = {}
  255. for config_file in config_files:
  256. for name, config in getattr(config_file, get_func)().items():
  257. mapping[name] = config or {}
  258. if not config:
  259. continue
  260. external = config.get('external')
  261. if external:
  262. if len(config.keys()) > 1:
  263. raise ConfigurationError(
  264. '{} {} declared as external but specifies'
  265. ' additional attributes ({}). '.format(
  266. entity_type,
  267. name,
  268. ', '.join([k for k in config.keys() if k != 'external'])
  269. )
  270. )
  271. if isinstance(external, dict):
  272. config['external_name'] = external.get('name')
  273. else:
  274. config['external_name'] = name
  275. mapping[name] = config
  276. if 'driver_opts' in config:
  277. config['driver_opts'] = build_string_dict(
  278. config['driver_opts']
  279. )
  280. return mapping
  281. def load_services(working_dir, config_file, service_configs):
  282. def build_service(service_name, service_dict, service_names):
  283. service_config = ServiceConfig.with_abs_paths(
  284. working_dir,
  285. config_file.filename,
  286. service_name,
  287. service_dict)
  288. resolver = ServiceExtendsResolver(service_config, config_file)
  289. service_dict = process_service(resolver.run())
  290. service_config = service_config._replace(config=service_dict)
  291. validate_service(service_config, service_names, config_file.version)
  292. service_dict = finalize_service(
  293. service_config,
  294. service_names,
  295. config_file.version)
  296. return service_dict
  297. def build_services(service_config):
  298. service_names = service_config.keys()
  299. return sort_service_dicts([
  300. build_service(name, service_dict, service_names)
  301. for name, service_dict in service_config.items()
  302. ])
  303. def merge_services(base, override):
  304. all_service_names = set(base) | set(override)
  305. return {
  306. name: merge_service_dicts_from_files(
  307. base.get(name, {}),
  308. override.get(name, {}),
  309. config_file.version)
  310. for name in all_service_names
  311. }
  312. service_config = service_configs[0]
  313. for next_config in service_configs[1:]:
  314. service_config = merge_services(service_config, next_config)
  315. return build_services(service_config)
  316. def interpolate_config_section(filename, config, section):
  317. validate_config_section(filename, config, section)
  318. return interpolate_environment_variables(config, section)
  319. def process_config_file(config_file, service_name=None):
  320. services = interpolate_config_section(
  321. config_file.filename,
  322. config_file.get_service_dicts(),
  323. 'service')
  324. if config_file.version == V2_0:
  325. processed_config = dict(config_file.config)
  326. processed_config['services'] = services
  327. processed_config['volumes'] = interpolate_config_section(
  328. config_file.filename,
  329. config_file.get_volumes(),
  330. 'volume')
  331. processed_config['networks'] = interpolate_config_section(
  332. config_file.filename,
  333. config_file.get_networks(),
  334. 'network')
  335. if config_file.version == V1:
  336. processed_config = services
  337. config_file = config_file._replace(config=processed_config)
  338. validate_against_config_schema(config_file)
  339. if service_name and service_name not in services:
  340. raise ConfigurationError(
  341. "Cannot extend service '{}' in {}: Service not found".format(
  342. service_name, config_file.filename))
  343. return config_file
  344. class ServiceExtendsResolver(object):
  345. def __init__(self, service_config, config_file, already_seen=None):
  346. self.service_config = service_config
  347. self.working_dir = service_config.working_dir
  348. self.already_seen = already_seen or []
  349. self.config_file = config_file
  350. @property
  351. def signature(self):
  352. return self.service_config.filename, self.service_config.name
  353. def detect_cycle(self):
  354. if self.signature in self.already_seen:
  355. raise CircularReference(self.already_seen + [self.signature])
  356. def run(self):
  357. self.detect_cycle()
  358. if 'extends' in self.service_config.config:
  359. service_dict = self.resolve_extends(*self.validate_and_construct_extends())
  360. return self.service_config._replace(config=service_dict)
  361. return self.service_config
  362. def validate_and_construct_extends(self):
  363. extends = self.service_config.config['extends']
  364. if not isinstance(extends, dict):
  365. extends = {'service': extends}
  366. config_path = self.get_extended_config_path(extends)
  367. service_name = extends['service']
  368. extends_file = ConfigFile.from_filename(config_path)
  369. validate_config_version([self.config_file, extends_file])
  370. extended_file = process_config_file(
  371. extends_file,
  372. service_name=service_name)
  373. service_config = extended_file.get_service(service_name)
  374. return config_path, service_config, service_name
  375. def resolve_extends(self, extended_config_path, service_dict, service_name):
  376. resolver = ServiceExtendsResolver(
  377. ServiceConfig.with_abs_paths(
  378. os.path.dirname(extended_config_path),
  379. extended_config_path,
  380. service_name,
  381. service_dict),
  382. self.config_file,
  383. already_seen=self.already_seen + [self.signature])
  384. service_config = resolver.run()
  385. other_service_dict = process_service(service_config)
  386. validate_extended_service_dict(
  387. other_service_dict,
  388. extended_config_path,
  389. service_name)
  390. return merge_service_dicts(
  391. other_service_dict,
  392. self.service_config.config,
  393. self.config_file.version)
  394. def get_extended_config_path(self, extends_options):
  395. """Service we are extending either has a value for 'file' set, which we
  396. need to obtain a full path too or we are extending from a service
  397. defined in our own file.
  398. """
  399. filename = self.service_config.filename
  400. validate_extends_file_path(
  401. self.service_config.name,
  402. extends_options,
  403. filename)
  404. if 'file' in extends_options:
  405. return expand_path(self.working_dir, extends_options['file'])
  406. return filename
  407. def resolve_environment(service_dict):
  408. """Unpack any environment variables from an env_file, if set.
  409. Interpolate environment values if set.
  410. """
  411. env = {}
  412. for env_file in service_dict.get('env_file', []):
  413. env.update(env_vars_from_file(env_file))
  414. env.update(parse_environment(service_dict.get('environment')))
  415. return dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
  416. def resolve_build_args(build):
  417. args = parse_build_arguments(build.get('args'))
  418. return dict(resolve_env_var(k, v) for k, v in six.iteritems(args))
  419. def validate_extended_service_dict(service_dict, filename, service):
  420. error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
  421. if 'links' in service_dict:
  422. raise ConfigurationError(
  423. "%s services with 'links' cannot be extended" % error_prefix)
  424. if 'volumes_from' in service_dict:
  425. raise ConfigurationError(
  426. "%s services with 'volumes_from' cannot be extended" % error_prefix)
  427. if 'net' in service_dict:
  428. if get_container_name_from_network_mode(service_dict['net']):
  429. raise ConfigurationError(
  430. "%s services with 'net: container' cannot be extended" % error_prefix)
  431. if 'network_mode' in service_dict:
  432. if get_service_name_from_network_mode(service_dict['network_mode']):
  433. raise ConfigurationError(
  434. "%s services with 'network_mode: service' cannot be extended" % error_prefix)
  435. if 'depends_on' in service_dict:
  436. raise ConfigurationError(
  437. "%s services with 'depends_on' cannot be extended" % error_prefix)
  438. def validate_service(service_config, service_names, version):
  439. service_dict, service_name = service_config.config, service_config.name
  440. validate_service_constraints(service_dict, service_name, version)
  441. validate_paths(service_dict)
  442. validate_ulimits(service_config)
  443. validate_network_mode(service_config, service_names)
  444. validate_depends_on(service_config, service_names)
  445. if not service_dict.get('image') and has_uppercase(service_name):
  446. raise ConfigurationError(
  447. "Service '{name}' contains uppercase characters which are not valid "
  448. "as part of an image name. Either use a lowercase service name or "
  449. "use the `image` field to set a custom name for the service image."
  450. .format(name=service_name))
  451. def process_service(service_config):
  452. working_dir = service_config.working_dir
  453. service_dict = dict(service_config.config)
  454. if 'env_file' in service_dict:
  455. service_dict['env_file'] = [
  456. expand_path(working_dir, path)
  457. for path in to_list(service_dict['env_file'])
  458. ]
  459. if 'build' in service_dict:
  460. if isinstance(service_dict['build'], six.string_types):
  461. service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
  462. elif isinstance(service_dict['build'], dict) and 'context' in service_dict['build']:
  463. path = service_dict['build']['context']
  464. service_dict['build']['context'] = resolve_build_path(working_dir, path)
  465. if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
  466. service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
  467. if 'labels' in service_dict:
  468. service_dict['labels'] = parse_labels(service_dict['labels'])
  469. if 'extra_hosts' in service_dict:
  470. service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
  471. for field in ['dns', 'dns_search', 'tmpfs']:
  472. if field in service_dict:
  473. service_dict[field] = to_list(service_dict[field])
  474. return service_dict
  475. def finalize_service(service_config, service_names, version):
  476. service_dict = dict(service_config.config)
  477. if 'environment' in service_dict or 'env_file' in service_dict:
  478. service_dict['environment'] = resolve_environment(service_dict)
  479. service_dict.pop('env_file', None)
  480. if 'volumes_from' in service_dict:
  481. service_dict['volumes_from'] = [
  482. VolumeFromSpec.parse(vf, service_names, version)
  483. for vf in service_dict['volumes_from']
  484. ]
  485. if 'volumes' in service_dict:
  486. service_dict['volumes'] = [
  487. VolumeSpec.parse(v) for v in service_dict['volumes']]
  488. if 'net' in service_dict:
  489. network_mode = service_dict.pop('net')
  490. container_name = get_container_name_from_network_mode(network_mode)
  491. if container_name and container_name in service_names:
  492. service_dict['network_mode'] = 'service:{}'.format(container_name)
  493. else:
  494. service_dict['network_mode'] = network_mode
  495. if 'networks' in service_dict:
  496. service_dict['networks'] = parse_networks(service_dict['networks'])
  497. if 'restart' in service_dict:
  498. service_dict['restart'] = parse_restart_spec(service_dict['restart'])
  499. normalize_build(service_dict, service_config.working_dir)
  500. service_dict['name'] = service_config.name
  501. return normalize_v1_service_format(service_dict)
  502. def normalize_v1_service_format(service_dict):
  503. if 'log_driver' in service_dict or 'log_opt' in service_dict:
  504. if 'logging' not in service_dict:
  505. service_dict['logging'] = {}
  506. if 'log_driver' in service_dict:
  507. service_dict['logging']['driver'] = service_dict['log_driver']
  508. del service_dict['log_driver']
  509. if 'log_opt' in service_dict:
  510. service_dict['logging']['options'] = service_dict['log_opt']
  511. del service_dict['log_opt']
  512. if 'dockerfile' in service_dict:
  513. service_dict['build'] = service_dict.get('build', {})
  514. service_dict['build'].update({
  515. 'dockerfile': service_dict.pop('dockerfile')
  516. })
  517. return service_dict
  518. def merge_service_dicts_from_files(base, override, version):
  519. """When merging services from multiple files we need to merge the `extends`
  520. field. This is not handled by `merge_service_dicts()` which is used to
  521. perform the `extends`.
  522. """
  523. new_service = merge_service_dicts(base, override, version)
  524. if 'extends' in override:
  525. new_service['extends'] = override['extends']
  526. elif 'extends' in base:
  527. new_service['extends'] = base['extends']
  528. return new_service
  529. class MergeDict(dict):
  530. """A dict-like object responsible for merging two dicts into one."""
  531. def __init__(self, base, override):
  532. self.base = base
  533. self.override = override
  534. def needs_merge(self, field):
  535. return field in self.base or field in self.override
  536. def merge_field(self, field, merge_func, default=None):
  537. if not self.needs_merge(field):
  538. return
  539. self[field] = merge_func(
  540. self.base.get(field, default),
  541. self.override.get(field, default))
  542. def merge_mapping(self, field, parse_func):
  543. if not self.needs_merge(field):
  544. return
  545. self[field] = parse_func(self.base.get(field))
  546. self[field].update(parse_func(self.override.get(field)))
  547. def merge_sequence(self, field, parse_func):
  548. def parse_sequence_func(seq):
  549. return to_mapping((parse_func(item) for item in seq), 'merge_field')
  550. if not self.needs_merge(field):
  551. return
  552. merged = parse_sequence_func(self.base.get(field, []))
  553. merged.update(parse_sequence_func(self.override.get(field, [])))
  554. self[field] = [item.repr() for item in merged.values()]
  555. def merge_scalar(self, field):
  556. if self.needs_merge(field):
  557. self[field] = self.override.get(field, self.base.get(field))
  558. def merge_service_dicts(base, override, version):
  559. md = MergeDict(base, override)
  560. md.merge_mapping('environment', parse_environment)
  561. md.merge_mapping('labels', parse_labels)
  562. md.merge_mapping('ulimits', parse_ulimits)
  563. md.merge_mapping('networks', parse_networks)
  564. md.merge_sequence('links', ServiceLink.parse)
  565. for field in ['volumes', 'devices']:
  566. md.merge_field(field, merge_path_mappings)
  567. for field in [
  568. 'depends_on',
  569. 'expose',
  570. 'external_links',
  571. 'ports',
  572. 'volumes_from',
  573. ]:
  574. md.merge_field(field, operator.add, default=[])
  575. for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
  576. md.merge_field(field, merge_list_or_string)
  577. for field in set(ALLOWED_KEYS) - set(md):
  578. md.merge_scalar(field)
  579. if version == V1:
  580. legacy_v1_merge_image_or_build(md, base, override)
  581. elif md.needs_merge('build'):
  582. md['build'] = merge_build(md, base, override)
  583. return dict(md)
  584. def merge_build(output, base, override):
  585. def to_dict(service):
  586. build_config = service.get('build', {})
  587. if isinstance(build_config, six.string_types):
  588. return {'context': build_config}
  589. return build_config
  590. md = MergeDict(to_dict(base), to_dict(override))
  591. md.merge_scalar('context')
  592. md.merge_scalar('dockerfile')
  593. md.merge_mapping('args', parse_build_arguments)
  594. return dict(md)
  595. def legacy_v1_merge_image_or_build(output, base, override):
  596. output.pop('image', None)
  597. output.pop('build', None)
  598. if 'image' in override:
  599. output['image'] = override['image']
  600. elif 'build' in override:
  601. output['build'] = override['build']
  602. elif 'image' in base:
  603. output['image'] = base['image']
  604. elif 'build' in base:
  605. output['build'] = base['build']
  606. def merge_environment(base, override):
  607. env = parse_environment(base)
  608. env.update(parse_environment(override))
  609. return env
  610. def split_env(env):
  611. if isinstance(env, six.binary_type):
  612. env = env.decode('utf-8', 'replace')
  613. if '=' in env:
  614. return env.split('=', 1)
  615. else:
  616. return env, None
  617. def split_label(label):
  618. if '=' in label:
  619. return label.split('=', 1)
  620. else:
  621. return label, ''
  622. def parse_dict_or_list(split_func, type_name, arguments):
  623. if not arguments:
  624. return {}
  625. if isinstance(arguments, list):
  626. return dict(split_func(e) for e in arguments)
  627. if isinstance(arguments, dict):
  628. return dict(arguments)
  629. raise ConfigurationError(
  630. "%s \"%s\" must be a list or mapping," %
  631. (type_name, arguments)
  632. )
  633. parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
  634. parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
  635. parse_labels = functools.partial(parse_dict_or_list, split_label, 'labels')
  636. parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
  637. def parse_ulimits(ulimits):
  638. if not ulimits:
  639. return {}
  640. if isinstance(ulimits, dict):
  641. return dict(ulimits)
  642. def resolve_env_var(key, val):
  643. if val is not None:
  644. return key, val
  645. elif key in os.environ:
  646. return key, os.environ[key]
  647. else:
  648. return key, None
  649. def env_vars_from_file(filename):
  650. """
  651. Read in a line delimited file of environment variables.
  652. """
  653. if not os.path.exists(filename):
  654. raise ConfigurationError("Couldn't find env file: %s" % filename)
  655. env = {}
  656. for line in codecs.open(filename, 'r', 'utf-8'):
  657. line = line.strip()
  658. if line and not line.startswith('#'):
  659. k, v = split_env(line)
  660. env[k] = v
  661. return env
  662. def resolve_volume_paths(working_dir, service_dict):
  663. return [
  664. resolve_volume_path(working_dir, volume)
  665. for volume in service_dict['volumes']
  666. ]
  667. def resolve_volume_path(working_dir, volume):
  668. container_path, host_path = split_path_mapping(volume)
  669. if host_path is not None:
  670. if host_path.startswith('.'):
  671. host_path = expand_path(working_dir, host_path)
  672. host_path = os.path.expanduser(host_path)
  673. return u"{}:{}".format(host_path, container_path)
  674. else:
  675. return container_path
  676. def normalize_build(service_dict, working_dir):
  677. if 'build' in service_dict:
  678. build = {}
  679. # Shortcut where specifying a string is treated as the build context
  680. if isinstance(service_dict['build'], six.string_types):
  681. build['context'] = service_dict.pop('build')
  682. else:
  683. build.update(service_dict['build'])
  684. if 'args' in build:
  685. build['args'] = build_string_dict(resolve_build_args(build))
  686. service_dict['build'] = build
  687. def resolve_build_path(working_dir, build_path):
  688. if is_url(build_path):
  689. return build_path
  690. return expand_path(working_dir, build_path)
  691. def is_url(build_path):
  692. return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
  693. def validate_paths(service_dict):
  694. if 'build' in service_dict:
  695. build = service_dict.get('build', {})
  696. if isinstance(build, six.string_types):
  697. build_path = build
  698. elif isinstance(build, dict) and 'context' in build:
  699. build_path = build['context']
  700. else:
  701. # We have a build section but no context, so nothing to validate
  702. return
  703. if (
  704. not is_url(build_path) and
  705. (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
  706. ):
  707. raise ConfigurationError(
  708. "build path %s either does not exist, is not accessible, "
  709. "or is not a valid URL." % build_path)
  710. def merge_path_mappings(base, override):
  711. d = dict_from_path_mappings(base)
  712. d.update(dict_from_path_mappings(override))
  713. return path_mappings_from_dict(d)
  714. def dict_from_path_mappings(path_mappings):
  715. if path_mappings:
  716. return dict(split_path_mapping(v) for v in path_mappings)
  717. else:
  718. return {}
  719. def path_mappings_from_dict(d):
  720. return [join_path_mapping(v) for v in d.items()]
  721. def split_path_mapping(volume_path):
  722. """
  723. Ascertain if the volume_path contains a host path as well as a container
  724. path. Using splitdrive so windows absolute paths won't cause issues with
  725. splitting on ':'.
  726. """
  727. # splitdrive has limitations when it comes to relative paths, so when it's
  728. # relative, handle special case to set the drive to ''
  729. if volume_path.startswith('.') or volume_path.startswith('~'):
  730. drive, volume_config = '', volume_path
  731. else:
  732. drive, volume_config = os.path.splitdrive(volume_path)
  733. if ':' in volume_config:
  734. (host, container) = volume_config.split(':', 1)
  735. return (container, drive + host)
  736. else:
  737. return (volume_path, None)
  738. def join_path_mapping(pair):
  739. (container, host) = pair
  740. if host is None:
  741. return container
  742. else:
  743. return ":".join((host, container))
  744. def expand_path(working_dir, path):
  745. return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
  746. def merge_list_or_string(base, override):
  747. return to_list(base) + to_list(override)
  748. def to_list(value):
  749. if value is None:
  750. return []
  751. elif isinstance(value, six.string_types):
  752. return [value]
  753. else:
  754. return value
  755. def to_mapping(sequence, key_field):
  756. return {getattr(item, key_field): item for item in sequence}
  757. def has_uppercase(name):
  758. return any(char in string.ascii_uppercase for char in name)
  759. def load_yaml(filename):
  760. try:
  761. with open(filename, 'r') as fh:
  762. return yaml.safe_load(fh)
  763. except (IOError, yaml.YAMLError) as e:
  764. error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
  765. raise ConfigurationError(u"{}: {}".format(error_name, e))