service_test.py 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771
  1. import os
  2. import re
  3. import shutil
  4. import tempfile
  5. from distutils.spawn import find_executable
  6. from io import StringIO
  7. from os import path
  8. import pytest
  9. from docker.errors import APIError
  10. from docker.errors import ImageNotFound
  11. from six import text_type
  12. from .. import mock
  13. from ..helpers import BUSYBOX_IMAGE_WITH_TAG
  14. from .testcases import docker_client
  15. from .testcases import DockerClientTestCase
  16. from .testcases import get_links
  17. from .testcases import pull_busybox
  18. from .testcases import SWARM_SKIP_CONTAINERS_ALL
  19. from .testcases import SWARM_SKIP_CPU_SHARES
  20. from compose import __version__
  21. from compose.config.types import MountSpec
  22. from compose.config.types import SecurityOpt
  23. from compose.config.types import VolumeFromSpec
  24. from compose.config.types import VolumeSpec
  25. from compose.const import IS_WINDOWS_PLATFORM
  26. from compose.const import LABEL_CONFIG_HASH
  27. from compose.const import LABEL_CONTAINER_NUMBER
  28. from compose.const import LABEL_ONE_OFF
  29. from compose.const import LABEL_PROJECT
  30. from compose.const import LABEL_SERVICE
  31. from compose.const import LABEL_VERSION
  32. from compose.container import Container
  33. from compose.errors import OperationFailedError
  34. from compose.parallel import ParallelStreamWriter
  35. from compose.project import OneOffFilter
  36. from compose.project import Project
  37. from compose.service import BuildAction
  38. from compose.service import ConvergencePlan
  39. from compose.service import ConvergenceStrategy
  40. from compose.service import NetworkMode
  41. from compose.service import PidMode
  42. from compose.service import Service
  43. from compose.utils import parse_nanoseconds_int
  44. from tests.helpers import create_custom_host_file
  45. from tests.integration.testcases import is_cluster
  46. from tests.integration.testcases import no_cluster
  47. from tests.integration.testcases import v2_1_only
  48. from tests.integration.testcases import v2_2_only
  49. from tests.integration.testcases import v2_3_only
  50. from tests.integration.testcases import v2_only
  51. from tests.integration.testcases import v3_only
  52. def create_and_start_container(service, **override_options):
  53. container = service.create_container(**override_options)
  54. return service.start_container(container)
  55. class ServiceTest(DockerClientTestCase):
  56. def test_containers(self):
  57. foo = self.create_service('foo')
  58. bar = self.create_service('bar')
  59. create_and_start_container(foo)
  60. assert len(foo.containers()) == 1
  61. assert foo.containers()[0].name.startswith('composetest_foo_')
  62. assert len(bar.containers()) == 0
  63. create_and_start_container(bar)
  64. create_and_start_container(bar)
  65. assert len(foo.containers()) == 1
  66. assert len(bar.containers()) == 2
  67. names = [c.name for c in bar.containers()]
  68. assert len(names) == 2
  69. assert all(name.startswith('composetest_bar_') for name in names)
  70. def test_containers_one_off(self):
  71. db = self.create_service('db')
  72. container = db.create_container(one_off=True)
  73. assert db.containers(stopped=True) == []
  74. assert db.containers(one_off=OneOffFilter.only, stopped=True) == [container]
  75. def test_project_is_added_to_container_name(self):
  76. service = self.create_service('web')
  77. create_and_start_container(service)
  78. assert service.containers()[0].name.startswith('composetest_web_')
  79. def test_create_container_with_one_off(self):
  80. db = self.create_service('db')
  81. container = db.create_container(one_off=True)
  82. assert container.name.startswith('composetest_db_run_')
  83. def test_create_container_with_one_off_when_existing_container_is_running(self):
  84. db = self.create_service('db')
  85. db.start()
  86. container = db.create_container(one_off=True)
  87. assert container.name.startswith('composetest_db_run_')
  88. def test_create_container_with_unspecified_volume(self):
  89. service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
  90. container = service.create_container()
  91. service.start_container(container)
  92. assert container.get_mount('/var/db')
  93. def test_create_container_with_volume_driver(self):
  94. service = self.create_service('db', volume_driver='foodriver')
  95. container = service.create_container()
  96. service.start_container(container)
  97. assert 'foodriver' == container.get('HostConfig.VolumeDriver')
  98. @pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug')
  99. def test_create_container_with_cpu_shares(self):
  100. service = self.create_service('db', cpu_shares=73)
  101. container = service.create_container()
  102. service.start_container(container)
  103. assert container.get('HostConfig.CpuShares') == 73
  104. def test_create_container_with_cpu_quota(self):
  105. service = self.create_service('db', cpu_quota=40000, cpu_period=150000)
  106. container = service.create_container()
  107. container.start()
  108. assert container.get('HostConfig.CpuQuota') == 40000
  109. assert container.get('HostConfig.CpuPeriod') == 150000
  110. @pytest.mark.xfail(raises=OperationFailedError, reason='not supported by kernel')
  111. def test_create_container_with_cpu_rt(self):
  112. service = self.create_service('db', cpu_rt_runtime=40000, cpu_rt_period=150000)
  113. container = service.create_container()
  114. container.start()
  115. assert container.get('HostConfig.CpuRealtimeRuntime') == 40000
  116. assert container.get('HostConfig.CpuRealtimePeriod') == 150000
  117. @v2_2_only()
  118. def test_create_container_with_cpu_count(self):
  119. self.require_api_version('1.25')
  120. service = self.create_service('db', cpu_count=2)
  121. container = service.create_container()
  122. service.start_container(container)
  123. assert container.get('HostConfig.CpuCount') == 2
  124. @v2_2_only()
  125. @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux')
  126. def test_create_container_with_cpu_percent(self):
  127. self.require_api_version('1.25')
  128. service = self.create_service('db', cpu_percent=12)
  129. container = service.create_container()
  130. service.start_container(container)
  131. assert container.get('HostConfig.CpuPercent') == 12
  132. @v2_2_only()
  133. def test_create_container_with_cpus(self):
  134. self.require_api_version('1.25')
  135. service = self.create_service('db', cpus=1)
  136. container = service.create_container()
  137. service.start_container(container)
  138. assert container.get('HostConfig.NanoCpus') == 1000000000
  139. def test_create_container_with_shm_size(self):
  140. self.require_api_version('1.22')
  141. service = self.create_service('db', shm_size=67108864)
  142. container = service.create_container()
  143. service.start_container(container)
  144. assert container.get('HostConfig.ShmSize') == 67108864
  145. def test_create_container_with_init_bool(self):
  146. self.require_api_version('1.25')
  147. service = self.create_service('db', init=True)
  148. container = service.create_container()
  149. service.start_container(container)
  150. assert container.get('HostConfig.Init') is True
  151. @pytest.mark.xfail(True, reason='Option has been removed in Engine 17.06.0')
  152. def test_create_container_with_init_path(self):
  153. self.require_api_version('1.25')
  154. docker_init_path = find_executable('docker-init')
  155. service = self.create_service('db', init=docker_init_path)
  156. container = service.create_container()
  157. service.start_container(container)
  158. assert container.get('HostConfig.InitPath') == docker_init_path
  159. @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
  160. def test_create_container_with_pids_limit(self):
  161. self.require_api_version('1.23')
  162. service = self.create_service('db', pids_limit=10)
  163. container = service.create_container()
  164. service.start_container(container)
  165. assert container.get('HostConfig.PidsLimit') == 10
  166. def test_create_container_with_extra_hosts_list(self):
  167. extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
  168. service = self.create_service('db', extra_hosts=extra_hosts)
  169. container = service.create_container()
  170. service.start_container(container)
  171. assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts)
  172. def test_create_container_with_extra_hosts_dicts(self):
  173. extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
  174. extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
  175. service = self.create_service('db', extra_hosts=extra_hosts)
  176. container = service.create_container()
  177. service.start_container(container)
  178. assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts_list)
  179. def test_create_container_with_cpu_set(self):
  180. service = self.create_service('db', cpuset='0')
  181. container = service.create_container()
  182. service.start_container(container)
  183. assert container.get('HostConfig.CpusetCpus') == '0'
  184. def test_create_container_with_read_only_root_fs(self):
  185. read_only = True
  186. service = self.create_service('db', read_only=read_only)
  187. container = service.create_container()
  188. service.start_container(container)
  189. assert container.get('HostConfig.ReadonlyRootfs') == read_only
  190. def test_create_container_with_blkio_config(self):
  191. blkio_config = {
  192. 'weight': 300,
  193. 'weight_device': [{'path': '/dev/sda', 'weight': 200}],
  194. 'device_read_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024 * 100}],
  195. 'device_read_iops': [{'path': '/dev/sda', 'rate': 1000}],
  196. 'device_write_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024}],
  197. 'device_write_iops': [{'path': '/dev/sda', 'rate': 800}]
  198. }
  199. service = self.create_service('web', blkio_config=blkio_config)
  200. container = service.create_container()
  201. assert container.get('HostConfig.BlkioWeight') == 300
  202. assert container.get('HostConfig.BlkioWeightDevice') == [{
  203. 'Path': '/dev/sda', 'Weight': 200
  204. }]
  205. assert container.get('HostConfig.BlkioDeviceReadBps') == [{
  206. 'Path': '/dev/sda', 'Rate': 1024 * 1024 * 100
  207. }]
  208. assert container.get('HostConfig.BlkioDeviceWriteBps') == [{
  209. 'Path': '/dev/sda', 'Rate': 1024 * 1024
  210. }]
  211. assert container.get('HostConfig.BlkioDeviceReadIOps') == [{
  212. 'Path': '/dev/sda', 'Rate': 1000
  213. }]
  214. assert container.get('HostConfig.BlkioDeviceWriteIOps') == [{
  215. 'Path': '/dev/sda', 'Rate': 800
  216. }]
  217. def test_create_container_with_security_opt(self):
  218. security_opt = [SecurityOpt.parse('label:disable')]
  219. service = self.create_service('db', security_opt=security_opt)
  220. container = service.create_container()
  221. service.start_container(container)
  222. assert set(container.get('HostConfig.SecurityOpt')) == set([o.repr() for o in security_opt])
  223. @pytest.mark.xfail(True, reason='Not supported on most drivers')
  224. def test_create_container_with_storage_opt(self):
  225. storage_opt = {'size': '1G'}
  226. service = self.create_service('db', storage_opt=storage_opt)
  227. container = service.create_container()
  228. service.start_container(container)
  229. assert container.get('HostConfig.StorageOpt') == storage_opt
  230. def test_create_container_with_oom_kill_disable(self):
  231. self.require_api_version('1.20')
  232. service = self.create_service('db', oom_kill_disable=True)
  233. container = service.create_container()
  234. assert container.get('HostConfig.OomKillDisable') is True
  235. def test_create_container_with_mac_address(self):
  236. service = self.create_service('db', mac_address='02:42:ac:11:65:43')
  237. container = service.create_container()
  238. service.start_container(container)
  239. assert container.inspect()['Config']['MacAddress'] == '02:42:ac:11:65:43'
  240. def test_create_container_with_device_cgroup_rules(self):
  241. service = self.create_service('db', device_cgroup_rules=['c 7:128 rwm'])
  242. container = service.create_container()
  243. assert container.get('HostConfig.DeviceCgroupRules') == ['c 7:128 rwm']
  244. def test_create_container_with_specified_volume(self):
  245. host_path = '/tmp/host-path'
  246. container_path = '/container-path'
  247. service = self.create_service(
  248. 'db',
  249. volumes=[VolumeSpec(host_path, container_path, 'rw')])
  250. container = service.create_container()
  251. service.start_container(container)
  252. assert container.get_mount(container_path)
  253. # Match the last component ("host-path"), because boot2docker symlinks /tmp
  254. actual_host_path = container.get_mount(container_path)['Source']
  255. assert path.basename(actual_host_path) == path.basename(host_path), (
  256. "Last component differs: %s, %s" % (actual_host_path, host_path)
  257. )
  258. @v2_3_only()
  259. def test_create_container_with_host_mount(self):
  260. host_path = '/tmp/host-path'
  261. container_path = '/container-path'
  262. create_custom_host_file(self.client, path.join(host_path, 'a.txt'), 'test')
  263. service = self.create_service(
  264. 'db',
  265. volumes=[
  266. MountSpec(type='bind', source=host_path, target=container_path, read_only=True)
  267. ]
  268. )
  269. container = service.create_container()
  270. service.start_container(container)
  271. mount = container.get_mount(container_path)
  272. assert mount
  273. assert path.basename(mount['Source']) == path.basename(host_path)
  274. assert mount['RW'] is False
  275. @v2_3_only()
  276. def test_create_container_with_tmpfs_mount(self):
  277. container_path = '/container-tmpfs'
  278. service = self.create_service(
  279. 'db',
  280. volumes=[MountSpec(type='tmpfs', target=container_path)]
  281. )
  282. container = service.create_container()
  283. service.start_container(container)
  284. mount = container.get_mount(container_path)
  285. assert mount
  286. assert mount['Type'] == 'tmpfs'
  287. @v2_3_only()
  288. def test_create_container_with_tmpfs_mount_tmpfs_size(self):
  289. container_path = '/container-tmpfs'
  290. service = self.create_service(
  291. 'db',
  292. volumes=[MountSpec(type='tmpfs', target=container_path, tmpfs={'size': 5368709})]
  293. )
  294. container = service.create_container()
  295. service.start_container(container)
  296. mount = container.get_mount(container_path)
  297. assert mount
  298. print(container.dictionary)
  299. assert mount['Type'] == 'tmpfs'
  300. assert container.get('HostConfig.Mounts')[0]['TmpfsOptions'] == {
  301. 'SizeBytes': 5368709
  302. }
  303. @v2_3_only()
  304. def test_create_container_with_volume_mount(self):
  305. container_path = '/container-volume'
  306. volume_name = 'composetest_abcde'
  307. self.client.create_volume(volume_name)
  308. service = self.create_service(
  309. 'db',
  310. volumes=[MountSpec(type='volume', source=volume_name, target=container_path)]
  311. )
  312. container = service.create_container()
  313. service.start_container(container)
  314. mount = container.get_mount(container_path)
  315. assert mount
  316. assert mount['Name'] == volume_name
  317. @v3_only()
  318. def test_create_container_with_legacy_mount(self):
  319. # Ensure mounts are converted to volumes if API version < 1.30
  320. # Needed to support long syntax in the 3.2 format
  321. client = docker_client({}, version='1.25')
  322. container_path = '/container-volume'
  323. volume_name = 'composetest_abcde'
  324. self.client.create_volume(volume_name)
  325. service = Service('db', client=client, volumes=[
  326. MountSpec(type='volume', source=volume_name, target=container_path)
  327. ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest')
  328. container = service.create_container()
  329. service.start_container(container)
  330. mount = container.get_mount(container_path)
  331. assert mount
  332. assert mount['Name'] == volume_name
  333. @v3_only()
  334. def test_create_container_with_legacy_tmpfs_mount(self):
  335. # Ensure tmpfs mounts are converted to tmpfs entries if API version < 1.30
  336. # Needed to support long syntax in the 3.2 format
  337. client = docker_client({}, version='1.25')
  338. container_path = '/container-tmpfs'
  339. service = Service('db', client=client, volumes=[
  340. MountSpec(type='tmpfs', target=container_path)
  341. ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest')
  342. container = service.create_container()
  343. service.start_container(container)
  344. mount = container.get_mount(container_path)
  345. assert mount is None
  346. assert container_path in container.get('HostConfig.Tmpfs')
  347. def test_create_container_with_healthcheck_config(self):
  348. one_second = parse_nanoseconds_int('1s')
  349. healthcheck = {
  350. 'test': ['true'],
  351. 'interval': 2 * one_second,
  352. 'timeout': 5 * one_second,
  353. 'retries': 5,
  354. 'start_period': 2 * one_second
  355. }
  356. service = self.create_service('db', healthcheck=healthcheck)
  357. container = service.create_container()
  358. remote_healthcheck = container.get('Config.Healthcheck')
  359. assert remote_healthcheck['Test'] == healthcheck['test']
  360. assert remote_healthcheck['Interval'] == healthcheck['interval']
  361. assert remote_healthcheck['Timeout'] == healthcheck['timeout']
  362. assert remote_healthcheck['Retries'] == healthcheck['retries']
  363. assert remote_healthcheck['StartPeriod'] == healthcheck['start_period']
  364. def test_recreate_preserves_volume_with_trailing_slash(self):
  365. """When the Compose file specifies a trailing slash in the container path, make
  366. sure we copy the volume over when recreating.
  367. """
  368. service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')])
  369. old_container = create_and_start_container(service)
  370. volume_path = old_container.get_mount('/data')['Source']
  371. new_container = service.recreate_container(old_container)
  372. assert new_container.get_mount('/data')['Source'] == volume_path
  373. def test_recreate_volume_to_mount(self):
  374. # https://github.com/docker/compose/issues/6280
  375. service = Service(
  376. project='composetest',
  377. name='db',
  378. client=self.client,
  379. build={'context': 'tests/fixtures/dockerfile-with-volume'},
  380. volumes=[MountSpec.parse({
  381. 'type': 'volume',
  382. 'target': '/data',
  383. })]
  384. )
  385. old_container = create_and_start_container(service)
  386. new_container = service.recreate_container(old_container)
  387. assert new_container.get_mount('/data')['Source']
  388. def test_duplicate_volume_trailing_slash(self):
  389. """
  390. When an image specifies a volume, and the Compose file specifies a host path
  391. but adds a trailing slash, make sure that we don't create duplicate binds.
  392. """
  393. host_path = '/tmp/data'
  394. container_path = '/data'
  395. volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))]
  396. tmp_container = self.client.create_container(
  397. 'busybox', 'true',
  398. volumes={container_path: {}},
  399. labels={'com.docker.compose.test_image': 'true'},
  400. host_config={}
  401. )
  402. image = self.client.commit(tmp_container)['Id']
  403. service = self.create_service('db', image=image, volumes=volumes)
  404. old_container = create_and_start_container(service)
  405. assert old_container.get('Config.Volumes') == {container_path: {}}
  406. service = self.create_service('db', image=image, volumes=volumes)
  407. new_container = service.recreate_container(old_container)
  408. assert new_container.get('Config.Volumes') == {container_path: {}}
  409. assert service.containers(stopped=False) == [new_container]
  410. def test_create_container_with_volumes_from(self):
  411. volume_service = self.create_service('data')
  412. volume_container_1 = volume_service.create_container()
  413. volume_container_2 = Container.create(
  414. self.client,
  415. image=BUSYBOX_IMAGE_WITH_TAG,
  416. command=["top"],
  417. labels={LABEL_PROJECT: 'composetest'},
  418. host_config={},
  419. environment=['affinity:container=={}'.format(volume_container_1.id)],
  420. )
  421. host_service = self.create_service(
  422. 'host',
  423. volumes_from=[
  424. VolumeFromSpec(volume_service, 'rw', 'service'),
  425. VolumeFromSpec(volume_container_2, 'rw', 'container')
  426. ],
  427. environment=['affinity:container=={}'.format(volume_container_1.id)],
  428. )
  429. host_container = host_service.create_container()
  430. host_service.start_container(host_container)
  431. assert volume_container_1.id + ':rw' in host_container.get('HostConfig.VolumesFrom')
  432. assert volume_container_2.id + ':rw' in host_container.get('HostConfig.VolumesFrom')
  433. def test_execute_convergence_plan_recreate(self):
  434. service = self.create_service(
  435. 'db',
  436. environment={'FOO': '1'},
  437. volumes=[VolumeSpec.parse('/etc')],
  438. entrypoint=['top'],
  439. command=['-d', '1']
  440. )
  441. old_container = service.create_container()
  442. assert old_container.get('Config.Entrypoint') == ['top']
  443. assert old_container.get('Config.Cmd') == ['-d', '1']
  444. assert 'FOO=1' in old_container.get('Config.Env')
  445. assert old_container.name.startswith('composetest_db_')
  446. service.start_container(old_container)
  447. old_container.inspect() # reload volume data
  448. volume_path = old_container.get_mount('/etc')['Source']
  449. num_containers_before = len(self.client.containers(all=True))
  450. service.options['environment']['FOO'] = '2'
  451. new_container, = service.execute_convergence_plan(
  452. ConvergencePlan('recreate', [old_container]))
  453. assert new_container.get('Config.Entrypoint') == ['top']
  454. assert new_container.get('Config.Cmd') == ['-d', '1']
  455. assert 'FOO=2' in new_container.get('Config.Env')
  456. assert new_container.name.startswith('composetest_db_')
  457. assert new_container.get_mount('/etc')['Source'] == volume_path
  458. if not is_cluster(self.client):
  459. assert (
  460. 'affinity:container==%s' % old_container.id in
  461. new_container.get('Config.Env')
  462. )
  463. else:
  464. # In Swarm, the env marker is consumed and the container should be deployed
  465. # on the same node.
  466. assert old_container.get('Node.Name') == new_container.get('Node.Name')
  467. assert len(self.client.containers(all=True)) == num_containers_before
  468. assert old_container.id != new_container.id
  469. with pytest.raises(APIError):
  470. self.client.inspect_container(old_container.id)
  471. def test_execute_convergence_plan_recreate_change_mount_target(self):
  472. service = self.create_service(
  473. 'db',
  474. volumes=[MountSpec(target='/app1', type='volume')],
  475. entrypoint=['top'], command=['-d', '1']
  476. )
  477. old_container = create_and_start_container(service)
  478. assert (
  479. [mount['Destination'] for mount in old_container.get('Mounts')] ==
  480. ['/app1']
  481. )
  482. service.options['volumes'] = [MountSpec(target='/app2', type='volume')]
  483. new_container, = service.execute_convergence_plan(
  484. ConvergencePlan('recreate', [old_container])
  485. )
  486. assert (
  487. [mount['Destination'] for mount in new_container.get('Mounts')] ==
  488. ['/app2']
  489. )
  490. def test_execute_convergence_plan_recreate_twice(self):
  491. service = self.create_service(
  492. 'db',
  493. volumes=[VolumeSpec.parse('/etc')],
  494. entrypoint=['top'],
  495. command=['-d', '1'])
  496. orig_container = service.create_container()
  497. service.start_container(orig_container)
  498. orig_container.inspect() # reload volume data
  499. volume_path = orig_container.get_mount('/etc')['Source']
  500. # Do this twice to reproduce the bug
  501. for _ in range(2):
  502. new_container, = service.execute_convergence_plan(
  503. ConvergencePlan('recreate', [orig_container]))
  504. assert new_container.get_mount('/etc')['Source'] == volume_path
  505. if not is_cluster(self.client):
  506. assert ('affinity:container==%s' % orig_container.id in
  507. new_container.get('Config.Env'))
  508. else:
  509. # In Swarm, the env marker is consumed and the container should be deployed
  510. # on the same node.
  511. assert orig_container.get('Node.Name') == new_container.get('Node.Name')
  512. orig_container = new_container
  513. @v2_3_only()
  514. def test_execute_convergence_plan_recreate_twice_with_mount(self):
  515. service = self.create_service(
  516. 'db',
  517. volumes=[MountSpec(target='/etc', type='volume')],
  518. entrypoint=['top'],
  519. command=['-d', '1']
  520. )
  521. orig_container = service.create_container()
  522. service.start_container(orig_container)
  523. orig_container.inspect() # reload volume data
  524. volume_path = orig_container.get_mount('/etc')['Source']
  525. # Do this twice to reproduce the bug
  526. for _ in range(2):
  527. new_container, = service.execute_convergence_plan(
  528. ConvergencePlan('recreate', [orig_container])
  529. )
  530. assert new_container.get_mount('/etc')['Source'] == volume_path
  531. if not is_cluster(self.client):
  532. assert ('affinity:container==%s' % orig_container.id in
  533. new_container.get('Config.Env'))
  534. else:
  535. # In Swarm, the env marker is consumed and the container should be deployed
  536. # on the same node.
  537. assert orig_container.get('Node.Name') == new_container.get('Node.Name')
  538. orig_container = new_container
  539. def test_execute_convergence_plan_when_containers_are_stopped(self):
  540. service = self.create_service(
  541. 'db',
  542. environment={'FOO': '1'},
  543. volumes=[VolumeSpec.parse('/var/db')],
  544. entrypoint=['top'],
  545. command=['-d', '1']
  546. )
  547. service.create_container()
  548. containers = service.containers(stopped=True)
  549. assert len(containers) == 1
  550. container, = containers
  551. assert not container.is_running
  552. service.execute_convergence_plan(ConvergencePlan('start', [container]))
  553. containers = service.containers()
  554. assert len(containers) == 1
  555. container.inspect()
  556. assert container == containers[0]
  557. assert container.is_running
  558. def test_execute_convergence_plan_with_image_declared_volume(self):
  559. service = Service(
  560. project='composetest',
  561. name='db',
  562. client=self.client,
  563. build={'context': 'tests/fixtures/dockerfile-with-volume'},
  564. )
  565. old_container = create_and_start_container(service)
  566. assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
  567. volume_path = old_container.get_mount('/data')['Source']
  568. new_container, = service.execute_convergence_plan(
  569. ConvergencePlan('recreate', [old_container]))
  570. assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
  571. assert new_container.get_mount('/data')['Source'] == volume_path
  572. def test_execute_convergence_plan_with_image_declared_volume_renew(self):
  573. service = Service(
  574. project='composetest',
  575. name='db',
  576. client=self.client,
  577. build={'context': 'tests/fixtures/dockerfile-with-volume'},
  578. )
  579. old_container = create_and_start_container(service)
  580. assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
  581. volume_path = old_container.get_mount('/data')['Source']
  582. new_container, = service.execute_convergence_plan(
  583. ConvergencePlan('recreate', [old_container]), renew_anonymous_volumes=True
  584. )
  585. assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
  586. assert new_container.get_mount('/data')['Source'] != volume_path
  587. def test_execute_convergence_plan_when_image_volume_masks_config(self):
  588. service = self.create_service(
  589. 'db',
  590. build={'context': 'tests/fixtures/dockerfile-with-volume'},
  591. )
  592. old_container = create_and_start_container(service)
  593. assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
  594. volume_path = old_container.get_mount('/data')['Source']
  595. service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
  596. with mock.patch('compose.service.log') as mock_log:
  597. new_container, = service.execute_convergence_plan(
  598. ConvergencePlan('recreate', [old_container]))
  599. mock_log.warning.assert_called_once_with(mock.ANY)
  600. _, args, kwargs = mock_log.warning.mock_calls[0]
  601. assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0]
  602. assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
  603. assert new_container.get_mount('/data')['Source'] == volume_path
  604. def test_execute_convergence_plan_when_host_volume_is_removed(self):
  605. host_path = '/tmp/host-path'
  606. service = self.create_service(
  607. 'db',
  608. build={'context': 'tests/fixtures/dockerfile-with-volume'},
  609. volumes=[VolumeSpec(host_path, '/data', 'rw')])
  610. old_container = create_and_start_container(service)
  611. assert (
  612. [mount['Destination'] for mount in old_container.get('Mounts')] ==
  613. ['/data']
  614. )
  615. service.options['volumes'] = []
  616. with mock.patch('compose.service.log', autospec=True) as mock_log:
  617. new_container, = service.execute_convergence_plan(
  618. ConvergencePlan('recreate', [old_container]))
  619. assert not mock_log.warn.called
  620. assert (
  621. [mount['Destination'] for mount in new_container.get('Mounts')] ==
  622. ['/data']
  623. )
  624. assert new_container.get_mount('/data')['Source'] != host_path
  625. def test_execute_convergence_plan_anonymous_volume_renew(self):
  626. service = self.create_service(
  627. 'db',
  628. image='busybox',
  629. volumes=[VolumeSpec(None, '/data', 'rw')])
  630. old_container = create_and_start_container(service)
  631. assert (
  632. [mount['Destination'] for mount in old_container.get('Mounts')] ==
  633. ['/data']
  634. )
  635. volume_path = old_container.get_mount('/data')['Source']
  636. new_container, = service.execute_convergence_plan(
  637. ConvergencePlan('recreate', [old_container]),
  638. renew_anonymous_volumes=True
  639. )
  640. assert (
  641. [mount['Destination'] for mount in new_container.get('Mounts')] ==
  642. ['/data']
  643. )
  644. assert new_container.get_mount('/data')['Source'] != volume_path
  645. def test_execute_convergence_plan_anonymous_volume_recreate_then_renew(self):
  646. service = self.create_service(
  647. 'db',
  648. image='busybox',
  649. volumes=[VolumeSpec(None, '/data', 'rw')])
  650. old_container = create_and_start_container(service)
  651. assert (
  652. [mount['Destination'] for mount in old_container.get('Mounts')] ==
  653. ['/data']
  654. )
  655. volume_path = old_container.get_mount('/data')['Source']
  656. mid_container, = service.execute_convergence_plan(
  657. ConvergencePlan('recreate', [old_container]),
  658. )
  659. assert (
  660. [mount['Destination'] for mount in mid_container.get('Mounts')] ==
  661. ['/data']
  662. )
  663. assert mid_container.get_mount('/data')['Source'] == volume_path
  664. new_container, = service.execute_convergence_plan(
  665. ConvergencePlan('recreate', [mid_container]),
  666. renew_anonymous_volumes=True
  667. )
  668. assert (
  669. [mount['Destination'] for mount in new_container.get('Mounts')] ==
  670. ['/data']
  671. )
  672. assert new_container.get_mount('/data')['Source'] != volume_path
  673. def test_execute_convergence_plan_without_start(self):
  674. service = self.create_service(
  675. 'db',
  676. build={'context': 'tests/fixtures/dockerfile-with-volume'}
  677. )
  678. containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False)
  679. service_containers = service.containers(stopped=True)
  680. assert len(service_containers) == 1
  681. assert not service_containers[0].is_running
  682. containers = service.execute_convergence_plan(
  683. ConvergencePlan('recreate', containers),
  684. start=False)
  685. service_containers = service.containers(stopped=True)
  686. assert len(service_containers) == 1
  687. assert not service_containers[0].is_running
  688. service.execute_convergence_plan(ConvergencePlan('start', containers), start=False)
  689. service_containers = service.containers(stopped=True)
  690. assert len(service_containers) == 1
  691. assert not service_containers[0].is_running
  692. def test_execute_convergence_plan_image_with_volume_is_removed(self):
  693. service = self.create_service(
  694. 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'}
  695. )
  696. old_container = create_and_start_container(service)
  697. assert (
  698. [mount['Destination'] for mount in old_container.get('Mounts')] ==
  699. ['/data']
  700. )
  701. volume_path = old_container.get_mount('/data')['Source']
  702. old_container.stop()
  703. self.client.remove_image(service.image(), force=True)
  704. service.ensure_image_exists()
  705. with pytest.raises(ImageNotFound):
  706. service.execute_convergence_plan(
  707. ConvergencePlan('recreate', [old_container])
  708. )
  709. old_container.inspect() # retrieve new name from server
  710. new_container, = service.execute_convergence_plan(
  711. ConvergencePlan('recreate', [old_container]),
  712. reset_container_image=True
  713. )
  714. assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
  715. assert new_container.get_mount('/data')['Source'] == volume_path
  716. def test_start_container_passes_through_options(self):
  717. db = self.create_service('db')
  718. create_and_start_container(db, environment={'FOO': 'BAR'})
  719. assert db.containers()[0].environment['FOO'] == 'BAR'
  720. def test_start_container_inherits_options_from_constructor(self):
  721. db = self.create_service('db', environment={'FOO': 'BAR'})
  722. create_and_start_container(db)
  723. assert db.containers()[0].environment['FOO'] == 'BAR'
  724. @no_cluster('No legacy links support in Swarm')
  725. def test_start_container_creates_links(self):
  726. db = self.create_service('db')
  727. web = self.create_service('web', links=[(db, None)])
  728. db1 = create_and_start_container(db)
  729. db2 = create_and_start_container(db)
  730. create_and_start_container(web)
  731. assert set(get_links(web.containers()[0])) == set([
  732. db1.name, db1.name_without_project,
  733. db2.name, db2.name_without_project,
  734. 'db'
  735. ])
  736. @no_cluster('No legacy links support in Swarm')
  737. def test_start_container_creates_links_with_names(self):
  738. db = self.create_service('db')
  739. web = self.create_service('web', links=[(db, 'custom_link_name')])
  740. db1 = create_and_start_container(db)
  741. db2 = create_and_start_container(db)
  742. create_and_start_container(web)
  743. assert set(get_links(web.containers()[0])) == set([
  744. db1.name, db1.name_without_project,
  745. db2.name, db2.name_without_project,
  746. 'custom_link_name'
  747. ])
  748. @no_cluster('No legacy links support in Swarm')
  749. def test_start_container_with_external_links(self):
  750. db = self.create_service('db')
  751. db_ctnrs = [create_and_start_container(db) for _ in range(3)]
  752. web = self.create_service(
  753. 'web', external_links=[
  754. db_ctnrs[0].name,
  755. db_ctnrs[1].name,
  756. '{}:db_3'.format(db_ctnrs[2].name)
  757. ]
  758. )
  759. create_and_start_container(web)
  760. assert set(get_links(web.containers()[0])) == set([
  761. db_ctnrs[0].name,
  762. db_ctnrs[1].name,
  763. 'db_3'
  764. ])
  765. @no_cluster('No legacy links support in Swarm')
  766. def test_start_normal_container_does_not_create_links_to_its_own_service(self):
  767. db = self.create_service('db')
  768. create_and_start_container(db)
  769. create_and_start_container(db)
  770. c = create_and_start_container(db)
  771. assert set(get_links(c)) == set([])
  772. @no_cluster('No legacy links support in Swarm')
  773. def test_start_one_off_container_creates_links_to_its_own_service(self):
  774. db = self.create_service('db')
  775. db1 = create_and_start_container(db)
  776. db2 = create_and_start_container(db)
  777. c = create_and_start_container(db, one_off=OneOffFilter.only)
  778. assert set(get_links(c)) == set([
  779. db1.name, db1.name_without_project,
  780. db2.name, db2.name_without_project,
  781. 'db'
  782. ])
  783. def test_start_container_builds_images(self):
  784. service = Service(
  785. name='test',
  786. client=self.client,
  787. build={'context': 'tests/fixtures/simple-dockerfile'},
  788. project='composetest',
  789. )
  790. container = create_and_start_container(service)
  791. container.wait()
  792. assert b'success' in container.logs()
  793. assert len(self.client.images(name='composetest_test')) >= 1
  794. def test_start_container_uses_tagged_image_if_it_exists(self):
  795. self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
  796. service = Service(
  797. name='test',
  798. client=self.client,
  799. build={'context': 'this/does/not/exist/and/will/throw/error'},
  800. project='composetest',
  801. )
  802. container = create_and_start_container(service)
  803. container.wait()
  804. assert b'success' in container.logs()
  805. def test_start_container_creates_ports(self):
  806. service = self.create_service('web', ports=[8000])
  807. container = create_and_start_container(service).inspect()
  808. assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/tcp']
  809. assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] != '8000'
  810. def test_build(self):
  811. base_dir = tempfile.mkdtemp()
  812. self.addCleanup(shutil.rmtree, base_dir)
  813. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  814. f.write("FROM busybox\n")
  815. service = self.create_service('web', build={'context': base_dir})
  816. service.build()
  817. self.addCleanup(self.client.remove_image, service.image_name)
  818. assert self.client.inspect_image('composetest_web')
  819. def test_build_cli(self):
  820. base_dir = tempfile.mkdtemp()
  821. self.addCleanup(shutil.rmtree, base_dir)
  822. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  823. f.write("FROM busybox\n")
  824. service = self.create_service('web',
  825. build={'context': base_dir},
  826. environment={
  827. 'COMPOSE_DOCKER_CLI_BUILD': '1',
  828. 'DOCKER_BUILDKIT': '1',
  829. })
  830. service.build(cli=True)
  831. self.addCleanup(self.client.remove_image, service.image_name)
  832. assert self.client.inspect_image('composetest_web')
  833. def test_build_cli_with_build_labels(self):
  834. base_dir = tempfile.mkdtemp()
  835. self.addCleanup(shutil.rmtree, base_dir)
  836. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  837. f.write("FROM busybox\n")
  838. service = self.create_service('web',
  839. build={
  840. 'context': base_dir,
  841. 'labels': {'com.docker.compose.test': 'true'}},
  842. )
  843. service.build(cli=True)
  844. self.addCleanup(self.client.remove_image, service.image_name)
  845. image = self.client.inspect_image('composetest_web')
  846. assert image['Config']['Labels']['com.docker.compose.test']
  847. def test_up_build_cli(self):
  848. base_dir = tempfile.mkdtemp()
  849. self.addCleanup(shutil.rmtree, base_dir)
  850. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  851. f.write("FROM busybox\n")
  852. web = self.create_service('web',
  853. build={'context': base_dir},
  854. environment={
  855. 'COMPOSE_DOCKER_CLI_BUILD': '1',
  856. 'DOCKER_BUILDKIT': '1',
  857. })
  858. project = Project('composetest', [web], self.client)
  859. project.up(do_build=BuildAction.force)
  860. containers = project.containers(['web'])
  861. assert len(containers) == 1
  862. assert containers[0].name.startswith('composetest_web_')
  863. def test_build_non_ascii_filename(self):
  864. base_dir = tempfile.mkdtemp()
  865. self.addCleanup(shutil.rmtree, base_dir)
  866. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  867. f.write("FROM busybox\n")
  868. with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
  869. f.write("hello world\n")
  870. service = self.create_service('web', build={'context': text_type(base_dir)})
  871. service.build()
  872. self.addCleanup(self.client.remove_image, service.image_name)
  873. assert self.client.inspect_image('composetest_web')
  874. def test_build_with_image_name(self):
  875. base_dir = tempfile.mkdtemp()
  876. self.addCleanup(shutil.rmtree, base_dir)
  877. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  878. f.write("FROM busybox\n")
  879. image_name = 'examples/composetest:latest'
  880. self.addCleanup(self.client.remove_image, image_name)
  881. self.create_service('web', build={'context': base_dir}, image=image_name).build()
  882. assert self.client.inspect_image(image_name)
  883. def test_build_with_git_url(self):
  884. build_url = "https://github.com/dnephin/docker-build-from-url.git"
  885. service = self.create_service('buildwithurl', build={'context': build_url})
  886. self.addCleanup(self.client.remove_image, service.image_name)
  887. service.build()
  888. assert service.image()
  889. def test_build_with_build_args(self):
  890. base_dir = tempfile.mkdtemp()
  891. self.addCleanup(shutil.rmtree, base_dir)
  892. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  893. f.write("FROM busybox\n")
  894. f.write("ARG build_version\n")
  895. f.write("RUN echo ${build_version}\n")
  896. service = self.create_service('buildwithargs',
  897. build={'context': text_type(base_dir),
  898. 'args': {"build_version": "1"}})
  899. service.build()
  900. self.addCleanup(self.client.remove_image, service.image_name)
  901. assert service.image()
  902. assert "build_version=1" in service.image()['ContainerConfig']['Cmd']
  903. def test_build_with_build_args_override(self):
  904. base_dir = tempfile.mkdtemp()
  905. self.addCleanup(shutil.rmtree, base_dir)
  906. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  907. f.write("FROM busybox\n")
  908. f.write("ARG build_version\n")
  909. f.write("RUN echo ${build_version}\n")
  910. service = self.create_service('buildwithargs',
  911. build={'context': text_type(base_dir),
  912. 'args': {"build_version": "1"}})
  913. service.build(build_args_override={'build_version': '2'})
  914. self.addCleanup(self.client.remove_image, service.image_name)
  915. assert service.image()
  916. assert "build_version=2" in service.image()['ContainerConfig']['Cmd']
  917. def test_build_with_build_labels(self):
  918. base_dir = tempfile.mkdtemp()
  919. self.addCleanup(shutil.rmtree, base_dir)
  920. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  921. f.write('FROM busybox\n')
  922. service = self.create_service('buildlabels', build={
  923. 'context': text_type(base_dir),
  924. 'labels': {'com.docker.compose.test': 'true'}
  925. })
  926. service.build()
  927. self.addCleanup(self.client.remove_image, service.image_name)
  928. assert service.image()
  929. assert service.image()['Config']['Labels']['com.docker.compose.test'] == 'true'
  930. @no_cluster('Container networks not on Swarm')
  931. def test_build_with_network(self):
  932. base_dir = tempfile.mkdtemp()
  933. self.addCleanup(shutil.rmtree, base_dir)
  934. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  935. f.write('FROM busybox\n')
  936. f.write('RUN ping -c1 google.local\n')
  937. net_container = self.client.create_container(
  938. 'busybox', 'top', host_config=self.client.create_host_config(
  939. extra_hosts={'google.local': '127.0.0.1'}
  940. ), name='composetest_build_network'
  941. )
  942. self.addCleanup(self.client.remove_container, net_container, force=True)
  943. self.client.start(net_container)
  944. service = self.create_service('buildwithnet', build={
  945. 'context': text_type(base_dir),
  946. 'network': 'container:{}'.format(net_container['Id'])
  947. })
  948. service.build()
  949. self.addCleanup(self.client.remove_image, service.image_name)
  950. assert service.image()
  951. @v2_3_only()
  952. @no_cluster('Not supported on UCP 2.2.0-beta1') # FIXME: remove once support is added
  953. def test_build_with_target(self):
  954. self.require_api_version('1.30')
  955. base_dir = tempfile.mkdtemp()
  956. self.addCleanup(shutil.rmtree, base_dir)
  957. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  958. f.write('FROM busybox as one\n')
  959. f.write('LABEL com.docker.compose.test=true\n')
  960. f.write('LABEL com.docker.compose.test.target=one\n')
  961. f.write('FROM busybox as two\n')
  962. f.write('LABEL com.docker.compose.test.target=two\n')
  963. service = self.create_service('buildtarget', build={
  964. 'context': text_type(base_dir),
  965. 'target': 'one'
  966. })
  967. service.build()
  968. assert service.image()
  969. assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
  970. @v2_3_only()
  971. def test_build_with_extra_hosts(self):
  972. self.require_api_version('1.27')
  973. base_dir = tempfile.mkdtemp()
  974. self.addCleanup(shutil.rmtree, base_dir)
  975. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  976. f.write('\n'.join([
  977. 'FROM busybox',
  978. 'RUN ping -c1 foobar',
  979. 'RUN ping -c1 baz',
  980. ]))
  981. service = self.create_service('build_extra_hosts', build={
  982. 'context': text_type(base_dir),
  983. 'extra_hosts': {
  984. 'foobar': '127.0.0.1',
  985. 'baz': '127.0.0.1'
  986. }
  987. })
  988. service.build()
  989. assert service.image()
  990. def test_build_with_gzip(self):
  991. base_dir = tempfile.mkdtemp()
  992. self.addCleanup(shutil.rmtree, base_dir)
  993. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  994. f.write('\n'.join([
  995. 'FROM busybox',
  996. 'COPY . /src',
  997. 'RUN cat /src/hello.txt'
  998. ]))
  999. with open(os.path.join(base_dir, 'hello.txt'), 'w') as f:
  1000. f.write('hello world\n')
  1001. service = self.create_service('build_gzip', build={
  1002. 'context': text_type(base_dir),
  1003. })
  1004. service.build(gzip=True)
  1005. assert service.image()
  1006. @v2_1_only()
  1007. def test_build_with_isolation(self):
  1008. base_dir = tempfile.mkdtemp()
  1009. self.addCleanup(shutil.rmtree, base_dir)
  1010. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  1011. f.write('FROM busybox\n')
  1012. service = self.create_service('build_isolation', build={
  1013. 'context': text_type(base_dir),
  1014. 'isolation': 'default',
  1015. })
  1016. service.build()
  1017. assert service.image()
  1018. def test_build_with_illegal_leading_chars(self):
  1019. base_dir = tempfile.mkdtemp()
  1020. self.addCleanup(shutil.rmtree, base_dir)
  1021. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  1022. f.write('FROM busybox\nRUN echo "Embodiment of Scarlet Devil"\n')
  1023. service = Service(
  1024. 'build_leading_slug', client=self.client,
  1025. project='___-composetest', build={
  1026. 'context': text_type(base_dir)
  1027. }
  1028. )
  1029. assert service.image_name == 'composetest_build_leading_slug'
  1030. service.build()
  1031. assert service.image()
  1032. def test_start_container_stays_unprivileged(self):
  1033. service = self.create_service('web')
  1034. container = create_and_start_container(service).inspect()
  1035. assert container['HostConfig']['Privileged'] is False
  1036. def test_start_container_becomes_privileged(self):
  1037. service = self.create_service('web', privileged=True)
  1038. container = create_and_start_container(service).inspect()
  1039. assert container['HostConfig']['Privileged'] is True
  1040. def test_expose_does_not_publish_ports(self):
  1041. service = self.create_service('web', expose=["8000"])
  1042. container = create_and_start_container(service).inspect()
  1043. assert container['NetworkSettings']['Ports'] == {'8000/tcp': None}
  1044. def test_start_container_creates_port_with_explicit_protocol(self):
  1045. service = self.create_service('web', ports=['8000/udp'])
  1046. container = create_and_start_container(service).inspect()
  1047. assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/udp']
  1048. def test_start_container_creates_fixed_external_ports(self):
  1049. service = self.create_service('web', ports=['8000:8000'])
  1050. container = create_and_start_container(service).inspect()
  1051. assert '8000/tcp' in container['NetworkSettings']['Ports']
  1052. assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8000'
  1053. def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
  1054. service = self.create_service('web', ports=['8001:8000'])
  1055. container = create_and_start_container(service).inspect()
  1056. assert '8000/tcp' in container['NetworkSettings']['Ports']
  1057. assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8001'
  1058. def test_port_with_explicit_interface(self):
  1059. service = self.create_service('web', ports=[
  1060. '127.0.0.1:8001:8000',
  1061. '0.0.0.0:9001:9000/udp',
  1062. ])
  1063. container = create_and_start_container(service).inspect()
  1064. assert container['NetworkSettings']['Ports']['8000/tcp'] == [{
  1065. 'HostIp': '127.0.0.1',
  1066. 'HostPort': '8001',
  1067. }]
  1068. assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostPort'] == '9001'
  1069. if not is_cluster(self.client):
  1070. assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostIp'] == '0.0.0.0'
  1071. # self.assertEqual(container['NetworkSettings']['Ports'], {
  1072. # '8000/tcp': [
  1073. # {
  1074. # 'HostIp': '127.0.0.1',
  1075. # 'HostPort': '8001',
  1076. # },
  1077. # ],
  1078. # '9000/udp': [
  1079. # {
  1080. # 'HostIp': '0.0.0.0',
  1081. # 'HostPort': '9001',
  1082. # },
  1083. # ],
  1084. # })
  1085. def test_create_with_image_id(self):
  1086. pull_busybox(self.client)
  1087. image_id = self.client.inspect_image(BUSYBOX_IMAGE_WITH_TAG)['Id'][:12]
  1088. service = self.create_service('foo', image=image_id)
  1089. service.create_container()
  1090. def test_scale(self):
  1091. service = self.create_service('web')
  1092. service.scale(1)
  1093. assert len(service.containers()) == 1
  1094. # Ensure containers don't have stdout or stdin connected
  1095. container = service.containers()[0]
  1096. config = container.inspect()['Config']
  1097. assert not config['AttachStderr']
  1098. assert not config['AttachStdout']
  1099. assert not config['AttachStdin']
  1100. service.scale(3)
  1101. assert len(service.containers()) == 3
  1102. service.scale(1)
  1103. assert len(service.containers()) == 1
  1104. service.scale(0)
  1105. assert len(service.containers()) == 0
  1106. @pytest.mark.skipif(
  1107. SWARM_SKIP_CONTAINERS_ALL,
  1108. reason='Swarm /containers/json bug'
  1109. )
  1110. def test_scale_with_stopped_containers(self):
  1111. """
  1112. Given there are some stopped containers and scale is called with a
  1113. desired number that is the same as the number of stopped containers,
  1114. test that those containers are restarted and not removed/recreated.
  1115. """
  1116. service = self.create_service('web')
  1117. service.create_container(number=1)
  1118. service.create_container(number=2)
  1119. ParallelStreamWriter.instance = None
  1120. with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
  1121. service.scale(2)
  1122. for container in service.containers():
  1123. assert container.is_running
  1124. assert container.number in [1, 2]
  1125. captured_output = mock_stderr.getvalue()
  1126. assert 'Creating' not in captured_output
  1127. assert 'Starting' in captured_output
  1128. def test_scale_with_stopped_containers_and_needing_creation(self):
  1129. """
  1130. Given there are some stopped containers and scale is called with a
  1131. desired number that is greater than the number of stopped containers,
  1132. test that those containers are restarted and required number are created.
  1133. """
  1134. service = self.create_service('web')
  1135. next_number = service._next_container_number()
  1136. service.create_container(number=next_number, quiet=True)
  1137. for container in service.containers():
  1138. assert not container.is_running
  1139. ParallelStreamWriter.instance = None
  1140. with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
  1141. service.scale(2)
  1142. assert len(service.containers()) == 2
  1143. for container in service.containers():
  1144. assert container.is_running
  1145. captured_output = mock_stderr.getvalue()
  1146. assert 'Creating' in captured_output
  1147. assert 'Starting' in captured_output
  1148. def test_scale_with_api_error(self):
  1149. """Test that when scaling if the API returns an error, that error is handled
  1150. and the remaining threads continue.
  1151. """
  1152. service = self.create_service('web')
  1153. next_number = service._next_container_number()
  1154. service.create_container(number=next_number, quiet=True)
  1155. with mock.patch(
  1156. 'compose.container.Container.create',
  1157. side_effect=APIError(
  1158. message="testing",
  1159. response={},
  1160. explanation="Boom")):
  1161. with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
  1162. with pytest.raises(OperationFailedError):
  1163. service.scale(3)
  1164. assert len(service.containers()) == 1
  1165. assert service.containers()[0].is_running
  1166. assert "ERROR: for composetest_web_" in mock_stderr.getvalue()
  1167. assert "Cannot create container for service web: Boom" in mock_stderr.getvalue()
  1168. def test_scale_with_unexpected_exception(self):
  1169. """Test that when scaling if the API returns an error, that is not of type
  1170. APIError, that error is re-raised.
  1171. """
  1172. service = self.create_service('web')
  1173. next_number = service._next_container_number()
  1174. service.create_container(number=next_number, quiet=True)
  1175. with mock.patch(
  1176. 'compose.container.Container.create',
  1177. side_effect=ValueError("BOOM")
  1178. ):
  1179. with pytest.raises(ValueError):
  1180. service.scale(3)
  1181. assert len(service.containers()) == 1
  1182. assert service.containers()[0].is_running
  1183. @mock.patch('compose.service.log')
  1184. def test_scale_with_desired_number_already_achieved(self, mock_log):
  1185. """
  1186. Test that calling scale with a desired number that is equal to the
  1187. number of containers already running results in no change.
  1188. """
  1189. service = self.create_service('web')
  1190. next_number = service._next_container_number()
  1191. container = service.create_container(number=next_number, quiet=True)
  1192. container.start()
  1193. container.inspect()
  1194. assert container.is_running
  1195. assert len(service.containers()) == 1
  1196. service.scale(1)
  1197. assert len(service.containers()) == 1
  1198. container.inspect()
  1199. assert container.is_running
  1200. captured_output = mock_log.info.call_args[0]
  1201. assert 'Desired container number already achieved' in captured_output
  1202. @mock.patch('compose.service.log')
  1203. def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
  1204. """Test that calling scale on a service that has a custom container name
  1205. results in warning output.
  1206. """
  1207. service = self.create_service('app', container_name='custom-container')
  1208. assert service.custom_container_name == 'custom-container'
  1209. with pytest.raises(OperationFailedError):
  1210. service.scale(3)
  1211. captured_output = mock_log.warning.call_args[0][0]
  1212. assert len(service.containers()) == 1
  1213. assert "Remove the custom name to scale the service." in captured_output
  1214. def test_scale_sets_ports(self):
  1215. service = self.create_service('web', ports=['8000'])
  1216. service.scale(2)
  1217. containers = service.containers()
  1218. assert len(containers) == 2
  1219. for container in containers:
  1220. assert list(container.get('HostConfig.PortBindings')) == ['8000/tcp']
  1221. def test_scale_with_immediate_exit(self):
  1222. service = self.create_service('web', image='busybox', command='true')
  1223. service.scale(2)
  1224. assert len(service.containers(stopped=True)) == 2
  1225. def test_network_mode_none(self):
  1226. service = self.create_service('web', network_mode=NetworkMode('none'))
  1227. container = create_and_start_container(service)
  1228. assert container.get('HostConfig.NetworkMode') == 'none'
  1229. def test_network_mode_bridged(self):
  1230. service = self.create_service('web', network_mode=NetworkMode('bridge'))
  1231. container = create_and_start_container(service)
  1232. assert container.get('HostConfig.NetworkMode') == 'bridge'
  1233. def test_network_mode_host(self):
  1234. service = self.create_service('web', network_mode=NetworkMode('host'))
  1235. container = create_and_start_container(service)
  1236. assert container.get('HostConfig.NetworkMode') == 'host'
  1237. def test_pid_mode_none_defined(self):
  1238. service = self.create_service('web', pid_mode=None)
  1239. container = create_and_start_container(service)
  1240. assert container.get('HostConfig.PidMode') == ''
  1241. def test_pid_mode_host(self):
  1242. service = self.create_service('web', pid_mode=PidMode('host'))
  1243. container = create_and_start_container(service)
  1244. assert container.get('HostConfig.PidMode') == 'host'
  1245. @v2_1_only()
  1246. def test_userns_mode_none_defined(self):
  1247. service = self.create_service('web', userns_mode=None)
  1248. container = create_and_start_container(service)
  1249. assert container.get('HostConfig.UsernsMode') == ''
  1250. @v2_1_only()
  1251. def test_userns_mode_host(self):
  1252. service = self.create_service('web', userns_mode='host')
  1253. container = create_and_start_container(service)
  1254. assert container.get('HostConfig.UsernsMode') == 'host'
  1255. def test_dns_no_value(self):
  1256. service = self.create_service('web')
  1257. container = create_and_start_container(service)
  1258. assert container.get('HostConfig.Dns') is None
  1259. def test_dns_list(self):
  1260. service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
  1261. container = create_and_start_container(service)
  1262. assert container.get('HostConfig.Dns') == ['8.8.8.8', '9.9.9.9']
  1263. def test_mem_swappiness(self):
  1264. service = self.create_service('web', mem_swappiness=11)
  1265. container = create_and_start_container(service)
  1266. assert container.get('HostConfig.MemorySwappiness') == 11
  1267. def test_mem_reservation(self):
  1268. service = self.create_service('web', mem_reservation='20m')
  1269. container = create_and_start_container(service)
  1270. assert container.get('HostConfig.MemoryReservation') == 20 * 1024 * 1024
  1271. def test_restart_always_value(self):
  1272. service = self.create_service('web', restart={'Name': 'always'})
  1273. container = create_and_start_container(service)
  1274. assert container.get('HostConfig.RestartPolicy.Name') == 'always'
  1275. def test_oom_score_adj_value(self):
  1276. service = self.create_service('web', oom_score_adj=500)
  1277. container = create_and_start_container(service)
  1278. assert container.get('HostConfig.OomScoreAdj') == 500
  1279. def test_group_add_value(self):
  1280. service = self.create_service('web', group_add=["root", "1"])
  1281. container = create_and_start_container(service)
  1282. host_container_groupadd = container.get('HostConfig.GroupAdd')
  1283. assert "root" in host_container_groupadd
  1284. assert "1" in host_container_groupadd
  1285. def test_dns_opt_value(self):
  1286. service = self.create_service('web', dns_opt=["use-vc", "no-tld-query"])
  1287. container = create_and_start_container(service)
  1288. dns_opt = container.get('HostConfig.DnsOptions')
  1289. assert 'use-vc' in dns_opt
  1290. assert 'no-tld-query' in dns_opt
  1291. def test_restart_on_failure_value(self):
  1292. service = self.create_service('web', restart={
  1293. 'Name': 'on-failure',
  1294. 'MaximumRetryCount': 5
  1295. })
  1296. container = create_and_start_container(service)
  1297. assert container.get('HostConfig.RestartPolicy.Name') == 'on-failure'
  1298. assert container.get('HostConfig.RestartPolicy.MaximumRetryCount') == 5
  1299. def test_cap_add_list(self):
  1300. service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
  1301. container = create_and_start_container(service)
  1302. assert container.get('HostConfig.CapAdd') == ['SYS_ADMIN', 'NET_ADMIN']
  1303. def test_cap_drop_list(self):
  1304. service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
  1305. container = create_and_start_container(service)
  1306. assert container.get('HostConfig.CapDrop') == ['SYS_ADMIN', 'NET_ADMIN']
  1307. def test_dns_search(self):
  1308. service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
  1309. container = create_and_start_container(service)
  1310. assert container.get('HostConfig.DnsSearch') == ['dc1.example.com', 'dc2.example.com']
  1311. @v2_only()
  1312. def test_tmpfs(self):
  1313. service = self.create_service('web', tmpfs=['/run'])
  1314. container = create_and_start_container(service)
  1315. assert container.get('HostConfig.Tmpfs') == {'/run': ''}
  1316. def test_working_dir_param(self):
  1317. service = self.create_service('container', working_dir='/working/dir/sample')
  1318. container = service.create_container()
  1319. assert container.get('Config.WorkingDir') == '/working/dir/sample'
  1320. def test_split_env(self):
  1321. service = self.create_service(
  1322. 'web',
  1323. environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
  1324. env = create_and_start_container(service).environment
  1325. for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
  1326. assert env[k] == v
  1327. def test_env_from_file_combined_with_env(self):
  1328. service = self.create_service(
  1329. 'web',
  1330. environment=['ONE=1', 'TWO=2', 'THREE=3'],
  1331. env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
  1332. env = create_and_start_container(service).environment
  1333. for k, v in {
  1334. 'ONE': '1',
  1335. 'TWO': '2',
  1336. 'THREE': '3',
  1337. 'FOO': 'baz',
  1338. 'DOO': 'dah'
  1339. }.items():
  1340. assert env[k] == v
  1341. @v3_only()
  1342. def test_build_with_cachefrom(self):
  1343. base_dir = tempfile.mkdtemp()
  1344. self.addCleanup(shutil.rmtree, base_dir)
  1345. with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
  1346. f.write("FROM busybox\n")
  1347. service = self.create_service('cache_from',
  1348. build={'context': base_dir,
  1349. 'cache_from': ['build1']})
  1350. service.build()
  1351. self.addCleanup(self.client.remove_image, service.image_name)
  1352. assert service.image()
  1353. @mock.patch.dict(os.environ)
  1354. def test_resolve_env(self):
  1355. os.environ['FILE_DEF'] = 'E1'
  1356. os.environ['FILE_DEF_EMPTY'] = 'E2'
  1357. os.environ['ENV_DEF'] = 'E3'
  1358. service = self.create_service(
  1359. 'web',
  1360. environment={
  1361. 'FILE_DEF': 'F1',
  1362. 'FILE_DEF_EMPTY': '',
  1363. 'ENV_DEF': None,
  1364. 'NO_DEF': None
  1365. }
  1366. )
  1367. env = create_and_start_container(service).environment
  1368. for k, v in {
  1369. 'FILE_DEF': 'F1',
  1370. 'FILE_DEF_EMPTY': '',
  1371. 'ENV_DEF': 'E3',
  1372. 'NO_DEF': None
  1373. }.items():
  1374. assert env[k] == v
  1375. def test_with_high_enough_api_version_we_get_default_network_mode(self):
  1376. # TODO: remove this test once minimum docker version is 1.8.x
  1377. with mock.patch.object(self.client, '_version', '1.20'):
  1378. service = self.create_service('web')
  1379. service_config = service._get_container_host_config({})
  1380. assert service_config['NetworkMode'] == 'default'
  1381. def test_labels(self):
  1382. labels_dict = {
  1383. 'com.example.description': "Accounting webapp",
  1384. 'com.example.department': "Finance",
  1385. 'com.example.label-with-empty-value': "",
  1386. }
  1387. compose_labels = {
  1388. LABEL_ONE_OFF: 'False',
  1389. LABEL_PROJECT: 'composetest',
  1390. LABEL_SERVICE: 'web',
  1391. LABEL_VERSION: __version__,
  1392. LABEL_CONTAINER_NUMBER: '1'
  1393. }
  1394. expected = dict(labels_dict, **compose_labels)
  1395. service = self.create_service('web', labels=labels_dict)
  1396. ctnr = create_and_start_container(service)
  1397. labels = ctnr.labels.items()
  1398. for pair in expected.items():
  1399. assert pair in labels
  1400. def test_empty_labels(self):
  1401. labels_dict = {'foo': '', 'bar': ''}
  1402. service = self.create_service('web', labels=labels_dict)
  1403. labels = create_and_start_container(service).labels.items()
  1404. for name in labels_dict:
  1405. assert (name, '') in labels
  1406. def test_stop_signal(self):
  1407. stop_signal = 'SIGINT'
  1408. service = self.create_service('web', stop_signal=stop_signal)
  1409. container = create_and_start_container(service)
  1410. assert container.stop_signal == stop_signal
  1411. def test_custom_container_name(self):
  1412. service = self.create_service('web', container_name='my-web-container')
  1413. assert service.custom_container_name == 'my-web-container'
  1414. container = create_and_start_container(service)
  1415. assert container.name == 'my-web-container'
  1416. one_off_container = service.create_container(one_off=True)
  1417. assert one_off_container.name != 'my-web-container'
  1418. @pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0")
  1419. def test_log_drive_invalid(self):
  1420. service = self.create_service('web', logging={'driver': 'xxx'})
  1421. expected_error_msg = "logger: no log driver named 'xxx' is registered"
  1422. with pytest.raises(APIError) as excinfo:
  1423. create_and_start_container(service)
  1424. assert re.search(expected_error_msg, excinfo.value)
  1425. def test_log_drive_empty_default_jsonfile(self):
  1426. service = self.create_service('web')
  1427. log_config = create_and_start_container(service).log_config
  1428. assert 'json-file' == log_config['Type']
  1429. assert not log_config['Config']
  1430. def test_log_drive_none(self):
  1431. service = self.create_service('web', logging={'driver': 'none'})
  1432. log_config = create_and_start_container(service).log_config
  1433. assert 'none' == log_config['Type']
  1434. assert not log_config['Config']
  1435. def test_devices(self):
  1436. service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
  1437. device_config = create_and_start_container(service).get('HostConfig.Devices')
  1438. device_dict = {
  1439. 'PathOnHost': '/dev/random',
  1440. 'CgroupPermissions': 'rwm',
  1441. 'PathInContainer': '/dev/mapped-random'
  1442. }
  1443. assert 1 == len(device_config)
  1444. assert device_dict == device_config[0]
  1445. def test_duplicate_containers(self):
  1446. service = self.create_service('web')
  1447. options = service._get_container_create_options({}, service._next_container_number())
  1448. original = Container.create(service.client, **options)
  1449. assert set(service.containers(stopped=True)) == set([original])
  1450. assert set(service.duplicate_containers()) == set()
  1451. options['name'] = 'temporary_container_name'
  1452. duplicate = Container.create(service.client, **options)
  1453. assert set(service.containers(stopped=True)) == set([original, duplicate])
  1454. assert set(service.duplicate_containers()) == set([duplicate])
  1455. def converge(service, strategy=ConvergenceStrategy.changed):
  1456. """Create a converge plan from a strategy and execute the plan."""
  1457. plan = service.convergence_plan(strategy)
  1458. return service.execute_convergence_plan(plan, timeout=1)
  1459. class ConfigHashTest(DockerClientTestCase):
  1460. def test_no_config_hash_when_one_off(self):
  1461. web = self.create_service('web')
  1462. container = web.create_container(one_off=True)
  1463. assert LABEL_CONFIG_HASH not in container.labels
  1464. def test_no_config_hash_when_overriding_options(self):
  1465. web = self.create_service('web')
  1466. container = web.create_container(environment={'FOO': '1'})
  1467. assert LABEL_CONFIG_HASH not in container.labels
  1468. def test_config_hash_with_custom_labels(self):
  1469. web = self.create_service('web', labels={'foo': '1'})
  1470. container = converge(web)[0]
  1471. assert LABEL_CONFIG_HASH in container.labels
  1472. assert 'foo' in container.labels
  1473. def test_config_hash_sticks_around(self):
  1474. web = self.create_service('web', command=["top"])
  1475. container = converge(web)[0]
  1476. assert LABEL_CONFIG_HASH in container.labels
  1477. web = self.create_service('web', command=["top", "-d", "1"])
  1478. container = converge(web)[0]
  1479. assert LABEL_CONFIG_HASH in container.labels