|
@@ -378,16 +378,12 @@ class Service(object):
|
|
|
self.start_container(container)
|
|
self.start_container(container)
|
|
|
return container
|
|
return container
|
|
|
|
|
|
|
|
- containers, errors = parallel_execute(
|
|
|
|
|
|
|
+ return parallel_execute(
|
|
|
range(i, i + scale),
|
|
range(i, i + scale),
|
|
|
lambda n: create_and_start(self, n),
|
|
lambda n: create_and_start(self, n),
|
|
|
lambda n: self.get_container_name(n),
|
|
lambda n: self.get_container_name(n),
|
|
|
"Creating"
|
|
"Creating"
|
|
|
- )
|
|
|
|
|
- for error in errors.values():
|
|
|
|
|
- raise OperationFailedError(error)
|
|
|
|
|
-
|
|
|
|
|
- return containers
|
|
|
|
|
|
|
+ )[0]
|
|
|
|
|
|
|
|
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
|
|
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
|
|
|
if len(containers) > scale:
|
|
if len(containers) > scale:
|
|
@@ -399,15 +395,12 @@ class Service(object):
|
|
|
container, timeout=timeout, attach_logs=not detached,
|
|
container, timeout=timeout, attach_logs=not detached,
|
|
|
start_new_container=start
|
|
start_new_container=start
|
|
|
)
|
|
)
|
|
|
- containers, errors = parallel_execute(
|
|
|
|
|
|
|
+ containers = parallel_execute(
|
|
|
containers,
|
|
containers,
|
|
|
recreate,
|
|
recreate,
|
|
|
lambda c: c.name,
|
|
lambda c: c.name,
|
|
|
"Recreating"
|
|
"Recreating"
|
|
|
- )
|
|
|
|
|
- for error in errors.values():
|
|
|
|
|
- raise OperationFailedError(error)
|
|
|
|
|
-
|
|
|
|
|
|
|
+ )[0]
|
|
|
if len(containers) < scale:
|
|
if len(containers) < scale:
|
|
|
containers.extend(self._execute_convergence_create(
|
|
containers.extend(self._execute_convergence_create(
|
|
|
scale - len(containers), detached, start
|
|
scale - len(containers), detached, start
|
|
@@ -419,16 +412,13 @@ class Service(object):
|
|
|
self._downscale(containers[scale:], timeout)
|
|
self._downscale(containers[scale:], timeout)
|
|
|
containers = containers[:scale]
|
|
containers = containers[:scale]
|
|
|
if start:
|
|
if start:
|
|
|
- _, errors = parallel_execute(
|
|
|
|
|
|
|
+ parallel_execute(
|
|
|
containers,
|
|
containers,
|
|
|
lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
|
|
lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
|
|
|
lambda c: c.name,
|
|
lambda c: c.name,
|
|
|
"Starting"
|
|
"Starting"
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
- for error in errors.values():
|
|
|
|
|
- raise OperationFailedError(error)
|
|
|
|
|
-
|
|
|
|
|
if len(containers) < scale:
|
|
if len(containers) < scale:
|
|
|
containers.extend(self._execute_convergence_create(
|
|
containers.extend(self._execute_convergence_create(
|
|
|
scale - len(containers), detached, start
|
|
scale - len(containers), detached, start
|