monitor.go 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. Copyright 2020 Docker Compose CLI authors
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package compose
  14. import (
  15. "context"
  16. "strconv"
  17. "github.com/containerd/errdefs"
  18. "github.com/docker/docker/api/types/container"
  19. "github.com/docker/docker/api/types/events"
  20. "github.com/docker/docker/api/types/filters"
  21. "github.com/docker/docker/client"
  22. "github.com/sirupsen/logrus"
  23. "github.com/docker/compose/v2/pkg/api"
  24. "github.com/docker/compose/v2/pkg/utils"
  25. )
  26. type monitor struct {
  27. api client.APIClient
  28. project string
  29. // services tells us which service to consider and those we can ignore, maybe ran by a concurrent compose command
  30. services map[string]bool
  31. listeners []api.ContainerEventListener
  32. }
  33. func newMonitor(api client.APIClient, project string) *monitor {
  34. return &monitor{
  35. api: api,
  36. project: project,
  37. services: map[string]bool{},
  38. }
  39. }
  40. func (c *monitor) withServices(services []string) {
  41. for _, name := range services {
  42. c.services[name] = true
  43. }
  44. }
  45. // Start runs monitor to detect application events and return after termination
  46. //
  47. //nolint:gocyclo
  48. func (c *monitor) Start(ctx context.Context) error {
  49. // collect initial application container
  50. initialState, err := c.api.ContainerList(ctx, container.ListOptions{
  51. All: true,
  52. Filters: filters.NewArgs(
  53. projectFilter(c.project),
  54. oneOffFilter(false),
  55. hasConfigHashLabel(),
  56. ),
  57. })
  58. if err != nil {
  59. return err
  60. }
  61. // containers is the set if container IDs the application is based on
  62. containers := utils.Set[string]{}
  63. for _, ctr := range initialState {
  64. if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
  65. containers.Add(ctr.ID)
  66. }
  67. }
  68. restarting := utils.Set[string]{}
  69. evtCh, errCh := c.api.Events(ctx, events.ListOptions{
  70. Filters: filters.NewArgs(
  71. filters.Arg("type", "container"),
  72. projectFilter(c.project)),
  73. })
  74. for {
  75. if len(containers) == 0 {
  76. return nil
  77. }
  78. select {
  79. case <-ctx.Done():
  80. return nil
  81. case err := <-errCh:
  82. return err
  83. case event := <-evtCh:
  84. if len(c.services) > 0 && !c.services[event.Actor.Attributes[api.ServiceLabel]] {
  85. continue
  86. }
  87. ctr, err := c.getContainerSummary(event)
  88. if err != nil {
  89. return err
  90. }
  91. switch event.Action {
  92. case events.ActionCreate:
  93. if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
  94. containers.Add(ctr.ID)
  95. }
  96. evtType := api.ContainerEventCreated
  97. if _, ok := ctr.Labels[api.ContainerReplaceLabel]; ok {
  98. evtType = api.ContainerEventRecreated
  99. }
  100. for _, listener := range c.listeners {
  101. listener(newContainerEvent(event.TimeNano, ctr, evtType))
  102. }
  103. logrus.Debugf("container %s created", ctr.Name)
  104. case events.ActionStart:
  105. restarted := restarting.Has(ctr.ID)
  106. if restarted {
  107. logrus.Debugf("container %s restarted", ctr.Name)
  108. for _, listener := range c.listeners {
  109. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted, func(e *api.ContainerEvent) {
  110. e.Restarting = restarted
  111. }))
  112. }
  113. } else {
  114. logrus.Debugf("container %s started", ctr.Name)
  115. for _, listener := range c.listeners {
  116. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventStarted))
  117. }
  118. }
  119. if len(c.services) == 0 || c.services[ctr.Labels[api.ServiceLabel]] {
  120. containers.Add(ctr.ID)
  121. }
  122. case events.ActionRestart:
  123. for _, listener := range c.listeners {
  124. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventRestarted))
  125. }
  126. logrus.Debugf("container %s restarted", ctr.Name)
  127. case events.ActionStop:
  128. // when a container is in restarting phase, and we stop the application (abort-on-container-exit)
  129. // we won't get any additional start+die events, just this stop as a proof container is down
  130. logrus.Debugf("container %s stopped", ctr.Name)
  131. containers.Remove(ctr.ID)
  132. case events.ActionDie:
  133. logrus.Debugf("container %s exited with code %d", ctr.Name, ctr.ExitCode)
  134. inspect, err := c.api.ContainerInspect(ctx, event.Actor.ID)
  135. if errdefs.IsNotFound(err) {
  136. // Source is already removed
  137. } else if err != nil {
  138. return err
  139. }
  140. if inspect.State != nil && inspect.State.Restarting || inspect.State.Running {
  141. // State.Restarting is set by engine when container is configured to restart on exit
  142. // on ContainerRestart it doesn't (see https://github.com/moby/moby/issues/45538)
  143. // container state still is reported as "running"
  144. logrus.Debugf("container %s is restarting", ctr.Name)
  145. restarting.Add(ctr.ID)
  146. for _, listener := range c.listeners {
  147. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited, func(e *api.ContainerEvent) {
  148. e.Restarting = true
  149. }))
  150. }
  151. } else {
  152. for _, listener := range c.listeners {
  153. listener(newContainerEvent(event.TimeNano, ctr, api.ContainerEventExited))
  154. }
  155. containers.Remove(ctr.ID)
  156. }
  157. }
  158. }
  159. }
  160. }
  161. func newContainerEvent(timeNano int64, ctr *api.ContainerSummary, eventType int, opts ...func(e *api.ContainerEvent)) api.ContainerEvent {
  162. name := ctr.Name
  163. defaultName := getDefaultContainerName(ctr.Project, ctr.Labels[api.ServiceLabel], ctr.Labels[api.ContainerNumberLabel])
  164. if name == defaultName {
  165. // remove project- prefix
  166. name = name[len(ctr.Project)+1:]
  167. }
  168. event := api.ContainerEvent{
  169. Type: eventType,
  170. Container: ctr,
  171. Time: timeNano,
  172. Source: name,
  173. ID: ctr.ID,
  174. Service: ctr.Service,
  175. ExitCode: ctr.ExitCode,
  176. }
  177. for _, opt := range opts {
  178. opt(&event)
  179. }
  180. return event
  181. }
  182. func (c *monitor) getContainerSummary(event events.Message) (*api.ContainerSummary, error) {
  183. ctr := &api.ContainerSummary{
  184. ID: event.Actor.ID,
  185. Name: event.Actor.Attributes["name"],
  186. Project: c.project,
  187. Service: event.Actor.Attributes[api.ServiceLabel],
  188. Labels: event.Actor.Attributes, // More than just labels, but that'c the closest the API gives us
  189. }
  190. if ec, ok := event.Actor.Attributes["exitCode"]; ok {
  191. exitCode, err := strconv.Atoi(ec)
  192. if err != nil {
  193. return nil, err
  194. }
  195. ctr.ExitCode = exitCode
  196. }
  197. return ctr, nil
  198. }
  199. func (c *monitor) withListener(listener api.ContainerEventListener) {
  200. c.listeners = append(c.listeners, listener)
  201. }