| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272 | from __future__ import absolute_importfrom __future__ import unicode_literalsimport loggingimport operatorimport sysfrom threading import Semaphorefrom threading import Threadfrom docker.errors import APIErrorfrom six.moves import _thread as threadfrom six.moves.queue import Emptyfrom six.moves.queue import Queuefrom compose.cli.colors import greenfrom compose.cli.colors import redfrom compose.cli.signals import ShutdownExceptionfrom compose.errors import HealthCheckFailedfrom compose.errors import NoHealthCheckConfiguredfrom compose.errors import OperationFailedErrorfrom compose.utils import get_output_streamlog = logging.getLogger(__name__)STOP = object()def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):    """Runs func on objects in parallel while ensuring that func is    ran on object only after it is ran on all its dependencies.    get_deps called on object must return a collection with its dependencies.    get_name called on object must return its name.    """    objects = list(objects)    stream = get_output_stream(sys.stderr)    writer = ParallelStreamWriter(stream, msg)    for obj in objects:        writer.initialize(get_name(obj))    events = parallel_execute_iter(objects, func, get_deps, limit)    errors = {}    results = []    error_to_reraise = None    for obj, result, exception in events:        if exception is None:            writer.write(get_name(obj), green('done'))            results.append(result)        elif isinstance(exception, APIError):            errors[get_name(obj)] = exception.explanation            writer.write(get_name(obj), red('error'))        elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):            errors[get_name(obj)] = exception.msg            writer.write(get_name(obj), red('error'))        elif isinstance(exception, UpstreamError):            writer.write(get_name(obj), red('error'))        else:            errors[get_name(obj)] = exception            error_to_reraise = exception    for obj_name, error in errors.items():        stream.write("\nERROR: for {}  {}\n".format(obj_name, error))    if error_to_reraise:        raise error_to_reraise    return results, errorsdef _no_deps(x):    return []class State(object):    """    Holds the state of a partially-complete parallel operation.    state.started:   objects being processed    state.finished:  objects which have been processed    state.failed:    objects which either failed or whose dependencies failed    """    def __init__(self, objects):        self.objects = objects        self.started = set()        self.finished = set()        self.failed = set()    def is_done(self):        return len(self.finished) + len(self.failed) >= len(self.objects)    def pending(self):        return set(self.objects) - self.started - self.finished - self.failedclass NoLimit(object):    def __enter__(self):        pass    def __exit__(self, *ex):        passdef parallel_execute_iter(objects, func, get_deps, limit):    """    Runs func on objects in parallel while ensuring that func is    ran on object only after it is ran on all its dependencies.    Returns an iterator of tuples which look like:    # if func returned normally when run on object    (object, result, None)    # if func raised an exception when run on object    (object, None, exception)    # if func raised an exception when run on one of object's dependencies    (object, None, UpstreamError())    """    if get_deps is None:        get_deps = _no_deps    if limit is None:        limiter = NoLimit()    else:        limiter = Semaphore(limit)    results = Queue()    state = State(objects)    while True:        feed_queue(objects, func, get_deps, results, state, limiter)        try:            event = results.get(timeout=0.1)        except Empty:            continue        # See https://github.com/docker/compose/issues/189        except thread.error:            raise ShutdownException()        if event is STOP:            break        obj, _, exception = event        if exception is None:            log.debug('Finished processing: {}'.format(obj))            state.finished.add(obj)        else:            log.debug('Failed: {}'.format(obj))            state.failed.add(obj)        yield eventdef producer(obj, func, results, limiter):    """    The entry point for a producer thread which runs func on a single object.    Places a tuple on the results queue once func has either returned or raised.    """    with limiter:        try:            result = func(obj)            results.put((obj, result, None))        except Exception as e:            results.put((obj, None, e))def feed_queue(objects, func, get_deps, results, state, limiter):    """    Starts producer threads for any objects which are ready to be processed    (i.e. they have no dependencies which haven't been successfully processed).    Shortcuts any objects whose dependencies have failed and places an    (object, None, UpstreamError()) tuple on the results queue.    """    pending = state.pending()    log.debug('Pending: {}'.format(pending))    for obj in pending:        deps = get_deps(obj)        try:            if any(dep[0] in state.failed for dep in deps):                log.debug('{} has upstream errors - not processing'.format(obj))                results.put((obj, None, UpstreamError()))                state.failed.add(obj)            elif all(                dep not in objects or (                    dep in state.finished and (not ready_check or ready_check(dep))                ) for dep, ready_check in deps            ):                log.debug('Starting producer thread for {}'.format(obj))                t = Thread(target=producer, args=(obj, func, results, limiter))                t.daemon = True                t.start()                state.started.add(obj)        except (HealthCheckFailed, NoHealthCheckConfigured) as e:            log.debug(                'Healthcheck for service(s) upstream of {} failed - '                'not processing'.format(obj)            )            results.put((obj, None, e))    if state.is_done():        results.put(STOP)class UpstreamError(Exception):    passclass ParallelStreamWriter(object):    """Write out messages for operations happening in parallel.    Each operation has its own line, and ANSI code characters are used    to jump to the correct line, and write over the line.    """    def __init__(self, stream, msg):        self.stream = stream        self.msg = msg        self.lines = []    def initialize(self, obj_index):        if self.msg is None:            return        self.lines.append(obj_index)        self.stream.write("{} {} ... \r\n".format(self.msg, obj_index))        self.stream.flush()    def write(self, obj_index, status):        if self.msg is None:            return        position = self.lines.index(obj_index)        diff = len(self.lines) - position        # move up        self.stream.write("%c[%dA" % (27, diff))        # erase        self.stream.write("%c[2K\r" % 27)        self.stream.write("{} {} ... {}\r".format(self.msg, obj_index, status))        # move back down        self.stream.write("%c[%dB" % (27, diff))        self.stream.flush()def parallel_operation(containers, operation, options, message):    parallel_execute(        containers,        operator.methodcaller(operation, **options),        operator.attrgetter('name'),        message)def parallel_remove(containers, options):    stopped_containers = [c for c in containers if not c.is_running]    parallel_operation(stopped_containers, 'remove', options, 'Removing')def parallel_pause(containers, options):    parallel_operation(containers, 'pause', options, 'Pausing')def parallel_unpause(containers, options):    parallel_operation(containers, 'unpause', options, 'Unpausing')def parallel_kill(containers, options):    parallel_operation(containers, 'kill', options, 'Killing')
 |