config.py 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534
  1. from __future__ import absolute_import
  2. from __future__ import unicode_literals
  3. import functools
  4. import io
  5. import logging
  6. import os
  7. import re
  8. import string
  9. import sys
  10. from collections import namedtuple
  11. from operator import attrgetter
  12. import six
  13. import yaml
  14. from cached_property import cached_property
  15. from . import types
  16. from .. import const
  17. from ..const import COMPOSEFILE_V1 as V1
  18. from ..const import COMPOSEFILE_V2_1 as V2_1
  19. from ..const import COMPOSEFILE_V2_3 as V2_3
  20. from ..const import COMPOSEFILE_V3_0 as V3_0
  21. from ..const import COMPOSEFILE_V3_4 as V3_4
  22. from ..utils import build_string_dict
  23. from ..utils import json_hash
  24. from ..utils import parse_bytes
  25. from ..utils import parse_nanoseconds_int
  26. from ..utils import splitdrive
  27. from ..version import ComposeVersion
  28. from .environment import env_vars_from_file
  29. from .environment import Environment
  30. from .environment import split_env
  31. from .errors import CircularReference
  32. from .errors import ComposeFileNotFound
  33. from .errors import ConfigurationError
  34. from .errors import DuplicateOverrideFileFound
  35. from .errors import VERSION_EXPLANATION
  36. from .interpolation import interpolate_environment_variables
  37. from .sort_services import get_container_name_from_network_mode
  38. from .sort_services import get_service_name_from_network_mode
  39. from .sort_services import sort_service_dicts
  40. from .types import MountSpec
  41. from .types import parse_extra_hosts
  42. from .types import parse_restart_spec
  43. from .types import SecurityOpt
  44. from .types import ServiceLink
  45. from .types import ServicePort
  46. from .types import VolumeFromSpec
  47. from .types import VolumeSpec
  48. from .validation import match_named_volumes
  49. from .validation import validate_against_config_schema
  50. from .validation import validate_config_section
  51. from .validation import validate_cpu
  52. from .validation import validate_credential_spec
  53. from .validation import validate_depends_on
  54. from .validation import validate_extends_file_path
  55. from .validation import validate_healthcheck
  56. from .validation import validate_links
  57. from .validation import validate_network_mode
  58. from .validation import validate_pid_mode
  59. from .validation import validate_service_constraints
  60. from .validation import validate_top_level_object
  61. from .validation import validate_ulimits
  62. DOCKER_CONFIG_KEYS = [
  63. 'cap_add',
  64. 'cap_drop',
  65. 'cgroup_parent',
  66. 'command',
  67. 'cpu_count',
  68. 'cpu_percent',
  69. 'cpu_period',
  70. 'cpu_quota',
  71. 'cpu_rt_period',
  72. 'cpu_rt_runtime',
  73. 'cpu_shares',
  74. 'cpus',
  75. 'cpuset',
  76. 'detach',
  77. 'device_cgroup_rules',
  78. 'devices',
  79. 'dns',
  80. 'dns_search',
  81. 'dns_opt',
  82. 'domainname',
  83. 'entrypoint',
  84. 'env_file',
  85. 'environment',
  86. 'extra_hosts',
  87. 'group_add',
  88. 'hostname',
  89. 'healthcheck',
  90. 'image',
  91. 'ipc',
  92. 'isolation',
  93. 'labels',
  94. 'links',
  95. 'mac_address',
  96. 'mem_limit',
  97. 'mem_reservation',
  98. 'memswap_limit',
  99. 'mem_swappiness',
  100. 'net',
  101. 'oom_score_adj',
  102. 'oom_kill_disable',
  103. 'pid',
  104. 'ports',
  105. 'privileged',
  106. 'read_only',
  107. 'restart',
  108. 'runtime',
  109. 'secrets',
  110. 'security_opt',
  111. 'shm_size',
  112. 'pids_limit',
  113. 'stdin_open',
  114. 'stop_signal',
  115. 'sysctls',
  116. 'tty',
  117. 'user',
  118. 'userns_mode',
  119. 'volume_driver',
  120. 'volumes',
  121. 'volumes_from',
  122. 'working_dir',
  123. ]
  124. ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
  125. 'blkio_config',
  126. 'build',
  127. 'container_name',
  128. 'credential_spec',
  129. 'dockerfile',
  130. 'init',
  131. 'log_driver',
  132. 'log_opt',
  133. 'logging',
  134. 'network_mode',
  135. 'platform',
  136. 'scale',
  137. 'stop_grace_period',
  138. ]
  139. DOCKER_VALID_URL_PREFIXES = (
  140. 'http://',
  141. 'https://',
  142. 'git://',
  143. 'github.com/',
  144. 'git@',
  145. )
  146. SUPPORTED_FILENAMES = [
  147. 'docker-compose.yml',
  148. 'docker-compose.yaml',
  149. ]
  150. DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml')
  151. log = logging.getLogger(__name__)
  152. class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
  153. """
  154. :param working_dir: the directory to use for relative paths in the config
  155. :type working_dir: string
  156. :param config_files: list of configuration files to load
  157. :type config_files: list of :class:`ConfigFile`
  158. :param environment: computed environment values for this project
  159. :type environment: :class:`environment.Environment`
  160. """
  161. def __new__(cls, working_dir, config_files, environment=None):
  162. if environment is None:
  163. environment = Environment.from_env_file(working_dir)
  164. return super(ConfigDetails, cls).__new__(
  165. cls, working_dir, config_files, environment
  166. )
  167. class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
  168. """
  169. :param filename: filename of the config file
  170. :type filename: string
  171. :param config: contents of the config file
  172. :type config: :class:`dict`
  173. """
  174. @classmethod
  175. def from_filename(cls, filename):
  176. return cls(filename, load_yaml(filename))
  177. @cached_property
  178. def version(self):
  179. if 'version' not in self.config:
  180. return V1
  181. version = self.config['version']
  182. if isinstance(version, dict):
  183. log.warning('Unexpected type for "version" key in "{}". Assuming '
  184. '"version" is the name of a service, and defaulting to '
  185. 'Compose file version 1.'.format(self.filename))
  186. return V1
  187. if not isinstance(version, six.string_types):
  188. raise ConfigurationError(
  189. 'Version in "{}" is invalid - it should be a string.'
  190. .format(self.filename))
  191. if version == '1':
  192. raise ConfigurationError(
  193. 'Version in "{}" is invalid. {}'
  194. .format(self.filename, VERSION_EXPLANATION)
  195. )
  196. version_pattern = re.compile(r"^[2-9]+(\.\d+)?$")
  197. if not version_pattern.match(version):
  198. raise ConfigurationError(
  199. 'Version "{}" in "{}" is invalid.'
  200. .format(version, self.filename))
  201. if version == '2':
  202. return const.COMPOSEFILE_V2_0
  203. if version == '3':
  204. return const.COMPOSEFILE_V3_0
  205. return ComposeVersion(version)
  206. def get_service(self, name):
  207. return self.get_service_dicts()[name]
  208. def get_service_dicts(self):
  209. return self.config if self.version == V1 else self.config.get('services', {})
  210. def get_volumes(self):
  211. return {} if self.version == V1 else self.config.get('volumes', {})
  212. def get_networks(self):
  213. return {} if self.version == V1 else self.config.get('networks', {})
  214. def get_secrets(self):
  215. return {} if self.version < const.COMPOSEFILE_V3_1 else self.config.get('secrets', {})
  216. def get_configs(self):
  217. return {} if self.version < const.COMPOSEFILE_V3_3 else self.config.get('configs', {})
  218. class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
  219. """
  220. :param version: configuration version
  221. :type version: int
  222. :param services: List of service description dictionaries
  223. :type services: :class:`list`
  224. :param volumes: Dictionary mapping volume names to description dictionaries
  225. :type volumes: :class:`dict`
  226. :param networks: Dictionary mapping network names to description dictionaries
  227. :type networks: :class:`dict`
  228. :param secrets: Dictionary mapping secret names to description dictionaries
  229. :type secrets: :class:`dict`
  230. :param configs: Dictionary mapping config names to description dictionaries
  231. :type configs: :class:`dict`
  232. """
  233. class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
  234. @classmethod
  235. def with_abs_paths(cls, working_dir, filename, name, config):
  236. if not working_dir:
  237. raise ValueError("No working_dir for ServiceConfig.")
  238. return cls(
  239. os.path.abspath(working_dir),
  240. os.path.abspath(filename) if filename else filename,
  241. name,
  242. config)
  243. def find(base_dir, filenames, environment, override_dir=None):
  244. if filenames == ['-']:
  245. return ConfigDetails(
  246. os.path.abspath(override_dir) if override_dir else os.getcwd(),
  247. [ConfigFile(None, yaml.safe_load(sys.stdin))],
  248. environment
  249. )
  250. if filenames:
  251. filenames = [os.path.join(base_dir, f) for f in filenames]
  252. else:
  253. filenames = get_default_config_files(base_dir)
  254. log.debug("Using configuration files: {}".format(",".join(filenames)))
  255. return ConfigDetails(
  256. override_dir if override_dir else os.path.dirname(filenames[0]),
  257. [ConfigFile.from_filename(f) for f in filenames],
  258. environment
  259. )
  260. def validate_config_version(config_files):
  261. main_file = config_files[0]
  262. validate_top_level_object(main_file)
  263. for next_file in config_files[1:]:
  264. validate_top_level_object(next_file)
  265. if main_file.version != next_file.version:
  266. raise ConfigurationError(
  267. "Version mismatch: file {0} specifies version {1} but "
  268. "extension file {2} uses version {3}".format(
  269. main_file.filename,
  270. main_file.version,
  271. next_file.filename,
  272. next_file.version))
  273. def get_default_config_files(base_dir):
  274. (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
  275. if not candidates:
  276. raise ComposeFileNotFound(SUPPORTED_FILENAMES)
  277. winner = candidates[0]
  278. if len(candidates) > 1:
  279. log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
  280. log.warning("Using %s\n", winner)
  281. return [os.path.join(path, winner)] + get_default_override_file(path)
  282. def get_default_override_file(path):
  283. override_files_in_path = [os.path.join(path, override_filename) for override_filename
  284. in DEFAULT_OVERRIDE_FILENAMES
  285. if os.path.exists(os.path.join(path, override_filename))]
  286. if len(override_files_in_path) > 1:
  287. raise DuplicateOverrideFileFound(override_files_in_path)
  288. return override_files_in_path
  289. def find_candidates_in_parent_dirs(filenames, path):
  290. """
  291. Given a directory path to start, looks for filenames in the
  292. directory, and then each parent directory successively,
  293. until found.
  294. Returns tuple (candidates, path).
  295. """
  296. candidates = [filename for filename in filenames
  297. if os.path.exists(os.path.join(path, filename))]
  298. if not candidates:
  299. parent_dir = os.path.join(path, '..')
  300. if os.path.abspath(parent_dir) != os.path.abspath(path):
  301. return find_candidates_in_parent_dirs(filenames, parent_dir)
  302. return (candidates, path)
  303. def check_swarm_only_config(service_dicts, compatibility=False):
  304. warning_template = (
  305. "Some services ({services}) use the '{key}' key, which will be ignored. "
  306. "Compose does not support '{key}' configuration - use "
  307. "`docker stack deploy` to deploy to a swarm."
  308. )
  309. def check_swarm_only_key(service_dicts, key):
  310. services = [s for s in service_dicts if s.get(key)]
  311. if services:
  312. log.warning(
  313. warning_template.format(
  314. services=", ".join(sorted(s['name'] for s in services)),
  315. key=key
  316. )
  317. )
  318. if not compatibility:
  319. check_swarm_only_key(service_dicts, 'deploy')
  320. check_swarm_only_key(service_dicts, 'configs')
  321. def load(config_details, compatibility=False, interpolate=True):
  322. """Load the configuration from a working directory and a list of
  323. configuration files. Files are loaded in order, and merged on top
  324. of each other to create the final configuration.
  325. Return a fully interpolated, extended and validated configuration.
  326. """
  327. validate_config_version(config_details.config_files)
  328. processed_files = [
  329. process_config_file(config_file, config_details.environment, interpolate=interpolate)
  330. for config_file in config_details.config_files
  331. ]
  332. config_details = config_details._replace(config_files=processed_files)
  333. main_file = config_details.config_files[0]
  334. volumes = load_mapping(
  335. config_details.config_files, 'get_volumes', 'Volume'
  336. )
  337. networks = load_mapping(
  338. config_details.config_files, 'get_networks', 'Network'
  339. )
  340. secrets = load_mapping(
  341. config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir
  342. )
  343. configs = load_mapping(
  344. config_details.config_files, 'get_configs', 'Config', config_details.working_dir
  345. )
  346. service_dicts = load_services(config_details, main_file, compatibility, interpolate=interpolate)
  347. if main_file.version != V1:
  348. for service_dict in service_dicts:
  349. match_named_volumes(service_dict, volumes)
  350. check_swarm_only_config(service_dicts, compatibility)
  351. version = V2_3 if compatibility and main_file.version >= V3_0 else main_file.version
  352. return Config(version, service_dicts, volumes, networks, secrets, configs)
  353. def load_mapping(config_files, get_func, entity_type, working_dir=None):
  354. mapping = {}
  355. for config_file in config_files:
  356. for name, config in getattr(config_file, get_func)().items():
  357. mapping[name] = config or {}
  358. if not config:
  359. continue
  360. external = config.get('external')
  361. if external:
  362. validate_external(entity_type, name, config, config_file.version)
  363. if isinstance(external, dict):
  364. config['name'] = external.get('name')
  365. elif not config.get('name'):
  366. config['name'] = name
  367. if 'driver_opts' in config:
  368. config['driver_opts'] = build_string_dict(
  369. config['driver_opts']
  370. )
  371. if 'labels' in config:
  372. config['labels'] = parse_labels(config['labels'])
  373. if 'file' in config:
  374. config['file'] = expand_path(working_dir, config['file'])
  375. return mapping
  376. def validate_external(entity_type, name, config, version):
  377. if (version < V2_1 or (version >= V3_0 and version < V3_4)) and len(config.keys()) > 1:
  378. raise ConfigurationError(
  379. "{} {} declared as external but specifies additional attributes "
  380. "({}).".format(
  381. entity_type, name, ', '.join(k for k in config if k != 'external')))
  382. def load_services(config_details, config_file, compatibility=False, interpolate=True):
  383. def build_service(service_name, service_dict, service_names):
  384. service_config = ServiceConfig.with_abs_paths(
  385. config_details.working_dir,
  386. config_file.filename,
  387. service_name,
  388. service_dict)
  389. resolver = ServiceExtendsResolver(
  390. service_config, config_file, environment=config_details.environment
  391. )
  392. service_dict = process_service(resolver.run())
  393. service_config = service_config._replace(config=service_dict)
  394. validate_service(service_config, service_names, config_file)
  395. service_dict = finalize_service(
  396. service_config,
  397. service_names,
  398. config_file.version,
  399. config_details.environment,
  400. compatibility,
  401. interpolate
  402. )
  403. return service_dict
  404. def build_services(service_config):
  405. service_names = service_config.keys()
  406. return sort_service_dicts([
  407. build_service(name, service_dict, service_names)
  408. for name, service_dict in service_config.items()
  409. ])
  410. def merge_services(base, override):
  411. all_service_names = set(base) | set(override)
  412. return {
  413. name: merge_service_dicts_from_files(
  414. base.get(name, {}),
  415. override.get(name, {}),
  416. config_file.version)
  417. for name in all_service_names
  418. }
  419. service_configs = [
  420. file.get_service_dicts() for file in config_details.config_files
  421. ]
  422. service_config = functools.reduce(merge_services, service_configs)
  423. return build_services(service_config)
  424. def interpolate_config_section(config_file, config, section, environment):
  425. return interpolate_environment_variables(
  426. config_file.version,
  427. config,
  428. section,
  429. environment
  430. )
  431. def process_config_section(config_file, config, section, environment, interpolate):
  432. validate_config_section(config_file.filename, config, section)
  433. if interpolate:
  434. return interpolate_environment_variables(
  435. config_file.version,
  436. config,
  437. section,
  438. environment
  439. )
  440. else:
  441. return config
  442. def process_config_file(config_file, environment, service_name=None, interpolate=True):
  443. services = process_config_section(
  444. config_file,
  445. config_file.get_service_dicts(),
  446. 'service',
  447. environment,
  448. interpolate,
  449. )
  450. if config_file.version > V1:
  451. processed_config = dict(config_file.config)
  452. processed_config['services'] = services
  453. processed_config['volumes'] = process_config_section(
  454. config_file,
  455. config_file.get_volumes(),
  456. 'volume',
  457. environment,
  458. interpolate,
  459. )
  460. processed_config['networks'] = process_config_section(
  461. config_file,
  462. config_file.get_networks(),
  463. 'network',
  464. environment,
  465. interpolate,
  466. )
  467. if config_file.version >= const.COMPOSEFILE_V3_1:
  468. processed_config['secrets'] = process_config_section(
  469. config_file,
  470. config_file.get_secrets(),
  471. 'secret',
  472. environment,
  473. interpolate,
  474. )
  475. if config_file.version >= const.COMPOSEFILE_V3_3:
  476. processed_config['configs'] = process_config_section(
  477. config_file,
  478. config_file.get_configs(),
  479. 'config',
  480. environment,
  481. interpolate,
  482. )
  483. else:
  484. processed_config = services
  485. config_file = config_file._replace(config=processed_config)
  486. validate_against_config_schema(config_file)
  487. if service_name and service_name not in services:
  488. raise ConfigurationError(
  489. "Cannot extend service '{}' in {}: Service not found".format(
  490. service_name, config_file.filename))
  491. return config_file
  492. class ServiceExtendsResolver(object):
  493. def __init__(self, service_config, config_file, environment, already_seen=None):
  494. self.service_config = service_config
  495. self.working_dir = service_config.working_dir
  496. self.already_seen = already_seen or []
  497. self.config_file = config_file
  498. self.environment = environment
  499. @property
  500. def signature(self):
  501. return self.service_config.filename, self.service_config.name
  502. def detect_cycle(self):
  503. if self.signature in self.already_seen:
  504. raise CircularReference(self.already_seen + [self.signature])
  505. def run(self):
  506. self.detect_cycle()
  507. if 'extends' in self.service_config.config:
  508. service_dict = self.resolve_extends(*self.validate_and_construct_extends())
  509. return self.service_config._replace(config=service_dict)
  510. return self.service_config
  511. def validate_and_construct_extends(self):
  512. extends = self.service_config.config['extends']
  513. if not isinstance(extends, dict):
  514. extends = {'service': extends}
  515. config_path = self.get_extended_config_path(extends)
  516. service_name = extends['service']
  517. if config_path == os.path.abspath(self.config_file.filename):
  518. try:
  519. service_config = self.config_file.get_service(service_name)
  520. except KeyError:
  521. raise ConfigurationError(
  522. "Cannot extend service '{}' in {}: Service not found".format(
  523. service_name, config_path)
  524. )
  525. else:
  526. extends_file = ConfigFile.from_filename(config_path)
  527. validate_config_version([self.config_file, extends_file])
  528. extended_file = process_config_file(
  529. extends_file, self.environment, service_name=service_name
  530. )
  531. service_config = extended_file.get_service(service_name)
  532. return config_path, service_config, service_name
  533. def resolve_extends(self, extended_config_path, service_dict, service_name):
  534. resolver = ServiceExtendsResolver(
  535. ServiceConfig.with_abs_paths(
  536. os.path.dirname(extended_config_path),
  537. extended_config_path,
  538. service_name,
  539. service_dict),
  540. self.config_file,
  541. already_seen=self.already_seen + [self.signature],
  542. environment=self.environment
  543. )
  544. service_config = resolver.run()
  545. other_service_dict = process_service(service_config)
  546. validate_extended_service_dict(
  547. other_service_dict,
  548. extended_config_path,
  549. service_name)
  550. return merge_service_dicts(
  551. other_service_dict,
  552. self.service_config.config,
  553. self.config_file.version)
  554. def get_extended_config_path(self, extends_options):
  555. """Service we are extending either has a value for 'file' set, which we
  556. need to obtain a full path too or we are extending from a service
  557. defined in our own file.
  558. """
  559. filename = self.service_config.filename
  560. validate_extends_file_path(
  561. self.service_config.name,
  562. extends_options,
  563. filename)
  564. if 'file' in extends_options:
  565. return expand_path(self.working_dir, extends_options['file'])
  566. return filename
  567. def resolve_environment(service_dict, environment=None, interpolate=True):
  568. """Unpack any environment variables from an env_file, if set.
  569. Interpolate environment values if set.
  570. """
  571. env = {}
  572. for env_file in service_dict.get('env_file', []):
  573. env.update(env_vars_from_file(env_file, interpolate))
  574. env.update(parse_environment(service_dict.get('environment')))
  575. return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
  576. def resolve_build_args(buildargs, environment):
  577. args = parse_build_arguments(buildargs)
  578. return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
  579. def validate_extended_service_dict(service_dict, filename, service):
  580. error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
  581. if 'links' in service_dict:
  582. raise ConfigurationError(
  583. "%s services with 'links' cannot be extended" % error_prefix)
  584. if 'volumes_from' in service_dict:
  585. raise ConfigurationError(
  586. "%s services with 'volumes_from' cannot be extended" % error_prefix)
  587. if 'net' in service_dict:
  588. if get_container_name_from_network_mode(service_dict['net']):
  589. raise ConfigurationError(
  590. "%s services with 'net: container' cannot be extended" % error_prefix)
  591. if 'network_mode' in service_dict:
  592. if get_service_name_from_network_mode(service_dict['network_mode']):
  593. raise ConfigurationError(
  594. "%s services with 'network_mode: service' cannot be extended" % error_prefix)
  595. if 'depends_on' in service_dict:
  596. raise ConfigurationError(
  597. "%s services with 'depends_on' cannot be extended" % error_prefix)
  598. def validate_service(service_config, service_names, config_file):
  599. service_dict, service_name = service_config.config, service_config.name
  600. validate_service_constraints(service_dict, service_name, config_file)
  601. validate_paths(service_dict)
  602. validate_cpu(service_config)
  603. validate_ulimits(service_config)
  604. validate_network_mode(service_config, service_names)
  605. validate_pid_mode(service_config, service_names)
  606. validate_depends_on(service_config, service_names)
  607. validate_links(service_config, service_names)
  608. validate_healthcheck(service_config)
  609. validate_credential_spec(service_config)
  610. if not service_dict.get('image') and has_uppercase(service_name):
  611. raise ConfigurationError(
  612. "Service '{name}' contains uppercase characters which are not valid "
  613. "as part of an image name. Either use a lowercase service name or "
  614. "use the `image` field to set a custom name for the service image."
  615. .format(name=service_name))
  616. def process_service(service_config):
  617. working_dir = service_config.working_dir
  618. service_dict = dict(service_config.config)
  619. if 'env_file' in service_dict:
  620. service_dict['env_file'] = [
  621. expand_path(working_dir, path)
  622. for path in to_list(service_dict['env_file'])
  623. ]
  624. if 'build' in service_dict:
  625. process_build_section(service_dict, working_dir)
  626. if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
  627. service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
  628. if 'sysctls' in service_dict:
  629. service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls']))
  630. if 'labels' in service_dict:
  631. service_dict['labels'] = parse_labels(service_dict['labels'])
  632. service_dict = process_depends_on(service_dict)
  633. for field in ['dns', 'dns_search', 'tmpfs']:
  634. if field in service_dict:
  635. service_dict[field] = to_list(service_dict[field])
  636. service_dict = process_security_opt(process_blkio_config(process_ports(
  637. process_healthcheck(service_dict)
  638. )))
  639. return service_dict
  640. def process_build_section(service_dict, working_dir):
  641. if isinstance(service_dict['build'], six.string_types):
  642. service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
  643. elif isinstance(service_dict['build'], dict):
  644. if 'context' in service_dict['build']:
  645. path = service_dict['build']['context']
  646. service_dict['build']['context'] = resolve_build_path(working_dir, path)
  647. if 'labels' in service_dict['build']:
  648. service_dict['build']['labels'] = parse_labels(service_dict['build']['labels'])
  649. def process_ports(service_dict):
  650. if 'ports' not in service_dict:
  651. return service_dict
  652. ports = []
  653. for port_definition in service_dict['ports']:
  654. if isinstance(port_definition, ServicePort):
  655. ports.append(port_definition)
  656. else:
  657. ports.extend(ServicePort.parse(port_definition))
  658. service_dict['ports'] = ports
  659. return service_dict
  660. def process_depends_on(service_dict):
  661. if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
  662. service_dict['depends_on'] = dict([
  663. (svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
  664. ])
  665. return service_dict
  666. def process_blkio_config(service_dict):
  667. if not service_dict.get('blkio_config'):
  668. return service_dict
  669. for field in ['device_read_bps', 'device_write_bps']:
  670. if field in service_dict['blkio_config']:
  671. for v in service_dict['blkio_config'].get(field, []):
  672. rate = v.get('rate', 0)
  673. v['rate'] = parse_bytes(rate)
  674. if v['rate'] is None:
  675. raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate))
  676. for field in ['device_read_iops', 'device_write_iops']:
  677. if field in service_dict['blkio_config']:
  678. for v in service_dict['blkio_config'].get(field, []):
  679. try:
  680. v['rate'] = int(v.get('rate', 0))
  681. except ValueError:
  682. raise ConfigurationError(
  683. 'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate'))
  684. )
  685. return service_dict
  686. def process_healthcheck(service_dict):
  687. if 'healthcheck' not in service_dict:
  688. return service_dict
  689. hc = service_dict['healthcheck']
  690. if 'disable' in hc:
  691. del hc['disable']
  692. hc['test'] = ['NONE']
  693. for field in ['interval', 'timeout', 'start_period']:
  694. if field not in hc or isinstance(hc[field], six.integer_types):
  695. continue
  696. hc[field] = parse_nanoseconds_int(hc[field])
  697. return service_dict
  698. def finalize_service_volumes(service_dict, environment):
  699. if 'volumes' in service_dict:
  700. finalized_volumes = []
  701. normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
  702. win_host = environment.get_boolean('COMPOSE_FORCE_WINDOWS_HOST')
  703. for v in service_dict['volumes']:
  704. if isinstance(v, dict):
  705. finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
  706. else:
  707. finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
  708. duplicate_mounts = []
  709. mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
  710. for mount in mounts:
  711. if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
  712. duplicate_mounts.append(mount.repr())
  713. if duplicate_mounts:
  714. raise ConfigurationError("Duplicate mount points: [%s]" % (
  715. ', '.join(duplicate_mounts)))
  716. service_dict['volumes'] = finalized_volumes
  717. return service_dict
  718. def finalize_service(service_config, service_names, version, environment, compatibility,
  719. interpolate=True):
  720. service_dict = dict(service_config.config)
  721. if 'environment' in service_dict or 'env_file' in service_dict:
  722. service_dict['environment'] = resolve_environment(service_dict, environment, interpolate)
  723. service_dict.pop('env_file', None)
  724. if 'volumes_from' in service_dict:
  725. service_dict['volumes_from'] = [
  726. VolumeFromSpec.parse(vf, service_names, version)
  727. for vf in service_dict['volumes_from']
  728. ]
  729. service_dict = finalize_service_volumes(service_dict, environment)
  730. if 'net' in service_dict:
  731. network_mode = service_dict.pop('net')
  732. container_name = get_container_name_from_network_mode(network_mode)
  733. if container_name and container_name in service_names:
  734. service_dict['network_mode'] = 'service:{}'.format(container_name)
  735. else:
  736. service_dict['network_mode'] = network_mode
  737. if 'networks' in service_dict:
  738. service_dict['networks'] = parse_networks(service_dict['networks'])
  739. if 'restart' in service_dict:
  740. service_dict['restart'] = parse_restart_spec(service_dict['restart'])
  741. if 'secrets' in service_dict:
  742. service_dict['secrets'] = [
  743. types.ServiceSecret.parse(s) for s in service_dict['secrets']
  744. ]
  745. if 'configs' in service_dict:
  746. service_dict['configs'] = [
  747. types.ServiceConfig.parse(c) for c in service_dict['configs']
  748. ]
  749. normalize_build(service_dict, service_config.working_dir, environment)
  750. if compatibility:
  751. service_dict = translate_credential_spec_to_security_opt(service_dict)
  752. service_dict, ignored_keys = translate_deploy_keys_to_container_config(
  753. service_dict
  754. )
  755. if ignored_keys:
  756. log.warning(
  757. 'The following deploy sub-keys are not supported in compatibility mode and have'
  758. ' been ignored: {}'.format(', '.join(ignored_keys))
  759. )
  760. service_dict['name'] = service_config.name
  761. return normalize_v1_service_format(service_dict)
  762. def translate_resource_keys_to_container_config(resources_dict, service_dict):
  763. if 'limits' in resources_dict:
  764. service_dict['mem_limit'] = resources_dict['limits'].get('memory')
  765. if 'cpus' in resources_dict['limits']:
  766. service_dict['cpus'] = float(resources_dict['limits']['cpus'])
  767. if 'reservations' in resources_dict:
  768. service_dict['mem_reservation'] = resources_dict['reservations'].get('memory')
  769. if 'cpus' in resources_dict['reservations']:
  770. return ['resources.reservations.cpus']
  771. return []
  772. def convert_restart_policy(name):
  773. try:
  774. return {
  775. 'any': 'always',
  776. 'none': 'no',
  777. 'on-failure': 'on-failure'
  778. }[name]
  779. except KeyError:
  780. raise ConfigurationError('Invalid restart policy "{}"'.format(name))
  781. def convert_credential_spec_to_security_opt(credential_spec):
  782. if 'file' in credential_spec:
  783. return 'file://{file}'.format(file=credential_spec['file'])
  784. return 'registry://{registry}'.format(registry=credential_spec['registry'])
  785. def translate_credential_spec_to_security_opt(service_dict):
  786. result = []
  787. if 'credential_spec' in service_dict:
  788. spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
  789. result.append('credentialspec={spec}'.format(spec=spec))
  790. if result:
  791. service_dict['security_opt'] = result
  792. return service_dict
  793. def translate_deploy_keys_to_container_config(service_dict):
  794. if 'credential_spec' in service_dict:
  795. del service_dict['credential_spec']
  796. if 'configs' in service_dict:
  797. del service_dict['configs']
  798. if 'deploy' not in service_dict:
  799. return service_dict, []
  800. deploy_dict = service_dict['deploy']
  801. ignored_keys = [
  802. k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config']
  803. if k in deploy_dict
  804. ]
  805. if 'replicas' in deploy_dict and deploy_dict.get('mode', 'replicated') == 'replicated':
  806. scale = deploy_dict.get('replicas', 1)
  807. max_replicas = deploy_dict.get('placement', {}).get('max_replicas_per_node', scale)
  808. service_dict['scale'] = min(scale, max_replicas)
  809. if max_replicas < scale:
  810. log.warning("Scale is limited to {} ('max_replicas_per_node' field).".format(
  811. max_replicas))
  812. if 'restart_policy' in deploy_dict:
  813. service_dict['restart'] = {
  814. 'Name': convert_restart_policy(deploy_dict['restart_policy'].get('condition', 'any')),
  815. 'MaximumRetryCount': deploy_dict['restart_policy'].get('max_attempts', 0)
  816. }
  817. for k in deploy_dict['restart_policy'].keys():
  818. if k != 'condition' and k != 'max_attempts':
  819. ignored_keys.append('restart_policy.{}'.format(k))
  820. ignored_keys.extend(
  821. translate_resource_keys_to_container_config(
  822. deploy_dict.get('resources', {}), service_dict
  823. )
  824. )
  825. del service_dict['deploy']
  826. return service_dict, ignored_keys
  827. def normalize_v1_service_format(service_dict):
  828. if 'log_driver' in service_dict or 'log_opt' in service_dict:
  829. if 'logging' not in service_dict:
  830. service_dict['logging'] = {}
  831. if 'log_driver' in service_dict:
  832. service_dict['logging']['driver'] = service_dict['log_driver']
  833. del service_dict['log_driver']
  834. if 'log_opt' in service_dict:
  835. service_dict['logging']['options'] = service_dict['log_opt']
  836. del service_dict['log_opt']
  837. if 'dockerfile' in service_dict:
  838. service_dict['build'] = service_dict.get('build', {})
  839. service_dict['build'].update({
  840. 'dockerfile': service_dict.pop('dockerfile')
  841. })
  842. return service_dict
  843. def merge_service_dicts_from_files(base, override, version):
  844. """When merging services from multiple files we need to merge the `extends`
  845. field. This is not handled by `merge_service_dicts()` which is used to
  846. perform the `extends`.
  847. """
  848. new_service = merge_service_dicts(base, override, version)
  849. if 'extends' in override:
  850. new_service['extends'] = override['extends']
  851. elif 'extends' in base:
  852. new_service['extends'] = base['extends']
  853. return new_service
  854. class MergeDict(dict):
  855. """A dict-like object responsible for merging two dicts into one."""
  856. def __init__(self, base, override):
  857. self.base = base
  858. self.override = override
  859. def needs_merge(self, field):
  860. return field in self.base or field in self.override
  861. def merge_field(self, field, merge_func, default=None):
  862. if not self.needs_merge(field):
  863. return
  864. self[field] = merge_func(
  865. self.base.get(field, default),
  866. self.override.get(field, default))
  867. def merge_mapping(self, field, parse_func=None):
  868. if not self.needs_merge(field):
  869. return
  870. if parse_func is None:
  871. def parse_func(m):
  872. return m or {}
  873. self[field] = parse_func(self.base.get(field))
  874. self[field].update(parse_func(self.override.get(field)))
  875. def merge_sequence(self, field, parse_func):
  876. def parse_sequence_func(seq):
  877. return to_mapping((parse_func(item) for item in seq), 'merge_field')
  878. if not self.needs_merge(field):
  879. return
  880. merged = parse_sequence_func(self.base.get(field, []))
  881. merged.update(parse_sequence_func(self.override.get(field, [])))
  882. self[field] = [item.repr() for item in sorted(merged.values())]
  883. def merge_scalar(self, field):
  884. if self.needs_merge(field):
  885. self[field] = self.override.get(field, self.base.get(field))
  886. def merge_service_dicts(base, override, version):
  887. md = MergeDict(base, override)
  888. md.merge_mapping('environment', parse_environment)
  889. md.merge_mapping('labels', parse_labels)
  890. md.merge_mapping('ulimits', parse_flat_dict)
  891. md.merge_mapping('sysctls', parse_sysctls)
  892. md.merge_mapping('depends_on', parse_depends_on)
  893. md.merge_mapping('storage_opt', parse_flat_dict)
  894. md.merge_sequence('links', ServiceLink.parse)
  895. md.merge_sequence('secrets', types.ServiceSecret.parse)
  896. md.merge_sequence('configs', types.ServiceConfig.parse)
  897. md.merge_sequence('security_opt', types.SecurityOpt.parse)
  898. md.merge_mapping('extra_hosts', parse_extra_hosts)
  899. md.merge_field('networks', merge_networks, default={})
  900. for field in ['volumes', 'devices']:
  901. md.merge_field(field, merge_path_mappings)
  902. for field in [
  903. 'cap_add', 'cap_drop', 'expose', 'external_links',
  904. 'volumes_from', 'device_cgroup_rules',
  905. ]:
  906. md.merge_field(field, merge_unique_items_lists, default=[])
  907. for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
  908. md.merge_field(field, merge_list_or_string)
  909. md.merge_field('logging', merge_logging, default={})
  910. merge_ports(md, base, override)
  911. md.merge_field('blkio_config', merge_blkio_config, default={})
  912. md.merge_field('healthcheck', merge_healthchecks, default={})
  913. md.merge_field('deploy', merge_deploy, default={})
  914. for field in set(ALLOWED_KEYS) - set(md):
  915. md.merge_scalar(field)
  916. if version == V1:
  917. legacy_v1_merge_image_or_build(md, base, override)
  918. elif md.needs_merge('build'):
  919. md['build'] = merge_build(md, base, override)
  920. return dict(md)
  921. def merge_unique_items_lists(base, override):
  922. override = [str(o) for o in override]
  923. base = [str(b) for b in base]
  924. return sorted(set().union(base, override))
  925. def merge_healthchecks(base, override):
  926. if override.get('disabled') is True:
  927. return override
  928. result = base.copy()
  929. result.update(override)
  930. return result
  931. def merge_ports(md, base, override):
  932. def parse_sequence_func(seq):
  933. acc = []
  934. for item in seq:
  935. acc.extend(ServicePort.parse(item))
  936. return to_mapping(acc, 'merge_field')
  937. field = 'ports'
  938. if not md.needs_merge(field):
  939. return
  940. merged = parse_sequence_func(md.base.get(field, []))
  941. merged.update(parse_sequence_func(md.override.get(field, [])))
  942. md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
  943. def merge_build(output, base, override):
  944. def to_dict(service):
  945. build_config = service.get('build', {})
  946. if isinstance(build_config, six.string_types):
  947. return {'context': build_config}
  948. return build_config
  949. md = MergeDict(to_dict(base), to_dict(override))
  950. md.merge_scalar('context')
  951. md.merge_scalar('dockerfile')
  952. md.merge_scalar('network')
  953. md.merge_scalar('target')
  954. md.merge_scalar('shm_size')
  955. md.merge_scalar('isolation')
  956. md.merge_mapping('args', parse_build_arguments)
  957. md.merge_field('cache_from', merge_unique_items_lists, default=[])
  958. md.merge_mapping('labels', parse_labels)
  959. md.merge_mapping('extra_hosts', parse_extra_hosts)
  960. return dict(md)
  961. def merge_deploy(base, override):
  962. md = MergeDict(base or {}, override or {})
  963. md.merge_scalar('mode')
  964. md.merge_scalar('endpoint_mode')
  965. md.merge_scalar('replicas')
  966. md.merge_mapping('labels', parse_labels)
  967. md.merge_mapping('update_config')
  968. md.merge_mapping('rollback_config')
  969. md.merge_mapping('restart_policy')
  970. if md.needs_merge('resources'):
  971. resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
  972. resources_md.merge_mapping('limits')
  973. resources_md.merge_field('reservations', merge_reservations, default={})
  974. md['resources'] = dict(resources_md)
  975. if md.needs_merge('placement'):
  976. placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {})
  977. placement_md.merge_field('constraints', merge_unique_items_lists, default=[])
  978. placement_md.merge_field('preferences', merge_unique_objects_lists, default=[])
  979. md['placement'] = dict(placement_md)
  980. return dict(md)
  981. def merge_networks(base, override):
  982. merged_networks = {}
  983. all_network_names = set(base) | set(override)
  984. base = {k: {} for k in base} if isinstance(base, list) else base
  985. override = {k: {} for k in override} if isinstance(override, list) else override
  986. for network_name in all_network_names:
  987. md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
  988. md.merge_field('aliases', merge_unique_items_lists, [])
  989. md.merge_field('link_local_ips', merge_unique_items_lists, [])
  990. md.merge_scalar('priority')
  991. md.merge_scalar('ipv4_address')
  992. md.merge_scalar('ipv6_address')
  993. merged_networks[network_name] = dict(md)
  994. return merged_networks
  995. def merge_reservations(base, override):
  996. md = MergeDict(base, override)
  997. md.merge_scalar('cpus')
  998. md.merge_scalar('memory')
  999. md.merge_sequence('generic_resources', types.GenericResource.parse)
  1000. return dict(md)
  1001. def merge_unique_objects_lists(base, override):
  1002. result = dict((json_hash(i), i) for i in base + override)
  1003. return [i[1] for i in sorted([(k, v) for k, v in result.items()], key=lambda x: x[0])]
  1004. def merge_blkio_config(base, override):
  1005. md = MergeDict(base, override)
  1006. md.merge_scalar('weight')
  1007. def merge_blkio_limits(base, override):
  1008. index = dict((b['path'], b) for b in base)
  1009. for o in override:
  1010. index[o['path']] = o
  1011. return sorted(list(index.values()), key=lambda x: x['path'])
  1012. for field in [
  1013. "device_read_bps", "device_read_iops", "device_write_bps",
  1014. "device_write_iops", "weight_device",
  1015. ]:
  1016. md.merge_field(field, merge_blkio_limits, default=[])
  1017. return dict(md)
  1018. def merge_logging(base, override):
  1019. md = MergeDict(base, override)
  1020. md.merge_scalar('driver')
  1021. if md.get('driver') == base.get('driver') or base.get('driver') is None:
  1022. md.merge_mapping('options', lambda m: m or {})
  1023. elif override.get('options'):
  1024. md['options'] = override.get('options', {})
  1025. return dict(md)
  1026. def legacy_v1_merge_image_or_build(output, base, override):
  1027. output.pop('image', None)
  1028. output.pop('build', None)
  1029. if 'image' in override:
  1030. output['image'] = override['image']
  1031. elif 'build' in override:
  1032. output['build'] = override['build']
  1033. elif 'image' in base:
  1034. output['image'] = base['image']
  1035. elif 'build' in base:
  1036. output['build'] = base['build']
  1037. def merge_environment(base, override):
  1038. env = parse_environment(base)
  1039. env.update(parse_environment(override))
  1040. return env
  1041. def merge_labels(base, override):
  1042. labels = parse_labels(base)
  1043. labels.update(parse_labels(override))
  1044. return labels
  1045. def split_kv(kvpair):
  1046. if '=' in kvpair:
  1047. return kvpair.split('=', 1)
  1048. else:
  1049. return kvpair, ''
  1050. def parse_dict_or_list(split_func, type_name, arguments):
  1051. if not arguments:
  1052. return {}
  1053. if isinstance(arguments, list):
  1054. return dict(split_func(e) for e in arguments)
  1055. if isinstance(arguments, dict):
  1056. return dict(arguments)
  1057. raise ConfigurationError(
  1058. "%s \"%s\" must be a list or mapping," %
  1059. (type_name, arguments)
  1060. )
  1061. parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
  1062. parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
  1063. parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels')
  1064. parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
  1065. parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
  1066. parse_depends_on = functools.partial(
  1067. parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
  1068. )
  1069. def parse_flat_dict(d):
  1070. if not d:
  1071. return {}
  1072. if isinstance(d, dict):
  1073. return dict(d)
  1074. raise ConfigurationError("Invalid type: expected mapping")
  1075. def resolve_env_var(key, val, environment):
  1076. if val is not None:
  1077. return key, val
  1078. elif environment and key in environment:
  1079. return key, environment[key]
  1080. else:
  1081. return key, None
  1082. def resolve_volume_paths(working_dir, service_dict):
  1083. return [
  1084. resolve_volume_path(working_dir, volume)
  1085. for volume in service_dict['volumes']
  1086. ]
  1087. def resolve_volume_path(working_dir, volume):
  1088. if isinstance(volume, dict):
  1089. if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
  1090. volume['source'] = expand_path(working_dir, volume['source'])
  1091. return volume
  1092. mount_params = None
  1093. container_path, mount_params = split_path_mapping(volume)
  1094. if mount_params is not None:
  1095. host_path, mode = mount_params
  1096. if host_path is None:
  1097. return container_path
  1098. if host_path.startswith('.'):
  1099. host_path = expand_path(working_dir, host_path)
  1100. host_path = os.path.expanduser(host_path)
  1101. return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
  1102. return container_path
  1103. def normalize_build(service_dict, working_dir, environment):
  1104. if 'build' in service_dict:
  1105. build = {}
  1106. # Shortcut where specifying a string is treated as the build context
  1107. if isinstance(service_dict['build'], six.string_types):
  1108. build['context'] = service_dict.pop('build')
  1109. else:
  1110. build.update(service_dict['build'])
  1111. if 'args' in build:
  1112. build['args'] = build_string_dict(
  1113. resolve_build_args(build.get('args'), environment)
  1114. )
  1115. service_dict['build'] = build
  1116. def resolve_build_path(working_dir, build_path):
  1117. if is_url(build_path):
  1118. return build_path
  1119. return expand_path(working_dir, build_path)
  1120. def is_url(build_path):
  1121. return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
  1122. def validate_paths(service_dict):
  1123. if 'build' in service_dict:
  1124. build = service_dict.get('build', {})
  1125. if isinstance(build, six.string_types):
  1126. build_path = build
  1127. elif isinstance(build, dict) and 'context' in build:
  1128. build_path = build['context']
  1129. else:
  1130. # We have a build section but no context, so nothing to validate
  1131. return
  1132. if (
  1133. not is_url(build_path) and
  1134. (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
  1135. ):
  1136. raise ConfigurationError(
  1137. "build path %s either does not exist, is not accessible, "
  1138. "or is not a valid URL." % build_path)
  1139. def merge_path_mappings(base, override):
  1140. d = dict_from_path_mappings(base)
  1141. d.update(dict_from_path_mappings(override))
  1142. return path_mappings_from_dict(d)
  1143. def dict_from_path_mappings(path_mappings):
  1144. if path_mappings:
  1145. return dict(split_path_mapping(v) for v in path_mappings)
  1146. else:
  1147. return {}
  1148. def path_mappings_from_dict(d):
  1149. return [join_path_mapping(v) for v in sorted(d.items())]
  1150. def split_path_mapping(volume_path):
  1151. """
  1152. Ascertain if the volume_path contains a host path as well as a container
  1153. path. Using splitdrive so windows absolute paths won't cause issues with
  1154. splitting on ':'.
  1155. """
  1156. if isinstance(volume_path, dict):
  1157. return (volume_path.get('target'), volume_path)
  1158. drive, volume_config = splitdrive(volume_path)
  1159. if ':' in volume_config:
  1160. (host, container) = volume_config.split(':', 1)
  1161. container_drive, container_path = splitdrive(container)
  1162. mode = None
  1163. if ':' in container_path:
  1164. container_path, mode = container_path.rsplit(':', 1)
  1165. return (container_drive + container_path, (drive + host, mode))
  1166. else:
  1167. return (volume_path, None)
  1168. def process_security_opt(service_dict):
  1169. security_opts = service_dict.get('security_opt', [])
  1170. result = []
  1171. for value in security_opts:
  1172. result.append(SecurityOpt.parse(value))
  1173. if result:
  1174. service_dict['security_opt'] = result
  1175. return service_dict
  1176. def join_path_mapping(pair):
  1177. (container, host) = pair
  1178. if isinstance(host, dict):
  1179. return host
  1180. elif host is None:
  1181. return container
  1182. else:
  1183. host, mode = host
  1184. result = ":".join((host, container))
  1185. if mode:
  1186. result += ":" + mode
  1187. return result
  1188. def expand_path(working_dir, path):
  1189. return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
  1190. def merge_list_or_string(base, override):
  1191. return to_list(base) + to_list(override)
  1192. def to_list(value):
  1193. if value is None:
  1194. return []
  1195. elif isinstance(value, six.string_types):
  1196. return [value]
  1197. else:
  1198. return value
  1199. def to_mapping(sequence, key_field):
  1200. return {getattr(item, key_field): item for item in sequence}
  1201. def has_uppercase(name):
  1202. return any(char in string.ascii_uppercase for char in name)
  1203. def load_yaml(filename, encoding=None, binary=True):
  1204. try:
  1205. with io.open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
  1206. return yaml.safe_load(fh)
  1207. except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
  1208. if encoding is None:
  1209. # Sometimes the user's locale sets an encoding that doesn't match
  1210. # the YAML files. Im such cases, retry once with the "default"
  1211. # UTF-8 encoding
  1212. return load_yaml(filename, encoding='utf-8-sig', binary=False)
  1213. error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
  1214. raise ConfigurationError(u"{}: {}".format(error_name, e))