monitor.go 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. Copyright 2020 Docker Compose CLI authors
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package compose
  14. import (
  15. "context"
  16. "strconv"
  17. "github.com/compose-spec/compose-go/v2/types"
  18. "github.com/containerd/errdefs"
  19. "github.com/docker/docker/api/types/container"
  20. "github.com/docker/docker/api/types/events"
  21. "github.com/docker/docker/api/types/filters"
  22. "github.com/docker/docker/client"
  23. "github.com/sirupsen/logrus"
  24. "github.com/docker/compose/v2/pkg/api"
  25. "github.com/docker/compose/v2/pkg/utils"
  26. )
  27. type monitor struct {
  28. api client.APIClient
  29. project *types.Project
  30. // services tells us which service to consider and those we can ignore, maybe ran by a concurrent compose command
  31. services map[string]bool
  32. listeners []api.ContainerEventListener
  33. }
  34. func newMonitor(api client.APIClient, project *types.Project) *monitor {
  35. services := map[string]bool{}
  36. if project != nil {
  37. for name := range project.Services {
  38. services[name] = true
  39. }
  40. }
  41. return &monitor{
  42. api: api,
  43. project: project,
  44. services: services,
  45. }
  46. }
  47. // Start runs monitor to detect application events and return after termination
  48. //
  49. //nolint:gocyclo
  50. func (c *monitor) Start(ctx context.Context) error {
  51. // collect initial application container
  52. initialState, err := c.api.ContainerList(ctx, container.ListOptions{
  53. All: true,
  54. Filters: filters.NewArgs(
  55. projectFilter(c.project.Name),
  56. oneOffFilter(false),
  57. hasConfigHashLabel(),
  58. ),
  59. })
  60. if err != nil {
  61. return err
  62. }
  63. // containers is the set if container IDs the application is based on
  64. containers := utils.Set[string]{}
  65. for _, ctr := range initialState {
  66. if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
  67. containers.Add(ctr.ID)
  68. }
  69. }
  70. restarting := utils.Set[string]{}
  71. evtCh, errCh := c.api.Events(ctx, events.ListOptions{
  72. Filters: filters.NewArgs(
  73. filters.Arg("type", "container"),
  74. projectFilter(c.project.Name)),
  75. })
  76. for {
  77. select {
  78. case <-ctx.Done():
  79. return nil
  80. case err := <-errCh:
  81. return err
  82. case event := <-evtCh:
  83. if !c.services[event.Actor.Attributes[api.ServiceLabel]] {
  84. continue
  85. }
  86. ctr, err := c.getContainerSummary(event)
  87. if err != nil {
  88. return err
  89. }
  90. switch event.Action {
  91. case events.ActionCreate:
  92. containers.Add(ctr.ID)
  93. for _, listener := range c.listeners {
  94. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventCreated))
  95. }
  96. logrus.Debugf("container %s created", ctr.Name)
  97. case events.ActionStart:
  98. restarted := restarting.Has(ctr.ID)
  99. for _, listener := range c.listeners {
  100. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted, func(e *api.ContainerEvent) {
  101. e.Restarting = restarted
  102. }))
  103. }
  104. if restarted {
  105. logrus.Debugf("container %s restarted", ctr.Name)
  106. } else {
  107. logrus.Debugf("container %s started", ctr.Name)
  108. }
  109. containers.Add(ctr.ID)
  110. case events.ActionRestart:
  111. for _, listener := range c.listeners {
  112. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventRestarted))
  113. }
  114. logrus.Debugf("container %s restarted", ctr.Name)
  115. case events.ActionStop:
  116. // when a container is in restarting phase, and we stop the application (abort-on-container-exit)
  117. // we won't get any additional start+die events, just this stop as a proof container is down
  118. logrus.Debugf("container %s stopped", ctr.Name)
  119. containers.Remove(ctr.ID)
  120. case events.ActionDie:
  121. logrus.Debugf("container %s exited with code %d", ctr.Name, ctr.ExitCode)
  122. inspect, err := c.api.ContainerInspect(ctx, event.Actor.ID)
  123. if errdefs.IsNotFound(err) {
  124. // Source is already removed
  125. } else if err != nil {
  126. return err
  127. }
  128. if inspect.State != nil && inspect.State.Restarting || inspect.State.Running {
  129. // State.Restarting is set by engine when container is configured to restart on exit
  130. // on ContainerRestart it doesn't (see https://github.com/moby/moby/issues/45538)
  131. // container state still is reported as "running"
  132. logrus.Debugf("container %s is restarting", ctr.Name)
  133. restarting.Add(ctr.ID)
  134. for _, listener := range c.listeners {
  135. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited, func(e *api.ContainerEvent) {
  136. e.Restarting = true
  137. }))
  138. }
  139. } else {
  140. for _, listener := range c.listeners {
  141. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited))
  142. }
  143. containers.Remove(ctr.ID)
  144. }
  145. }
  146. }
  147. if len(containers) == 0 {
  148. return nil
  149. }
  150. }
  151. }
  152. func newContainerEvent(timeNano int64, ctr *api.ContainerSummary, eventType int, opts ...func(e *api.ContainerEvent)) api.ContainerEvent {
  153. name := ctr.Name
  154. defaultName := getDefaultContainerName(ctr.Project, ctr.Labels[api.ServiceLabel], ctr.Labels[api.ContainerNumberLabel])
  155. if name == defaultName {
  156. // remove project- prefix
  157. name = name[len(ctr.Project)+1:]
  158. }
  159. event := api.ContainerEvent{
  160. Type: eventType,
  161. Container: ctr,
  162. Time: timeNano,
  163. Source: name,
  164. ID: ctr.ID,
  165. Service: ctr.Service,
  166. ExitCode: ctr.ExitCode,
  167. }
  168. for _, opt := range opts {
  169. opt(&event)
  170. }
  171. return event
  172. }
  173. func (c *monitor) getContainerSummary(event events.Message) (*api.ContainerSummary, error) {
  174. ctr := &api.ContainerSummary{
  175. ID: event.Actor.ID,
  176. Name: event.Actor.Attributes["name"],
  177. Project: c.project.Name,
  178. Service: event.Actor.Attributes[api.ServiceLabel],
  179. Labels: event.Actor.Attributes, // More than just labels, but that'c the closest the API gives us
  180. }
  181. if ec, ok := event.Actor.Attributes["exitCode"]; ok {
  182. exitCode, err := strconv.Atoi(ec)
  183. if err != nil {
  184. return nil, err
  185. }
  186. ctr.ExitCode = exitCode
  187. }
  188. return ctr, nil
  189. }
  190. func (c *monitor) withListener(listener api.ContainerEventListener) {
  191. c.listeners = append(c.listeners, listener)
  192. }