docker: sync openEuler-22.03-LTS-Next with openEuler-22.03-LTS

This commit is contained in:
chenjiankun 2022-09-15 16:58:10 +08:00
parent e6a2315a4e
commit 7fed8d00d0
45 changed files with 2705 additions and 11 deletions

View File

@ -1 +1 @@
18.09.0.118
18.09.0.309

View File

@ -1,6 +1,6 @@
Name: docker-engine
Version: 18.09.0
Release: 119
Release: 309
Summary: The open-source application container engine
Group: Tools/Docker
@ -166,9 +166,6 @@ install -p -m 644 components/engine/contrib/syntax/nano/Dockerfile.nanorc $RPM_B
/usr/share/zsh/vendor-completions/_docker
/usr/share/fish/vendor_completions.d/docker.fish
%doc
#/%{_mandir}/man1/*
#/%{_mandir}/man5/*
#/%{_mandir}/man8/*
%config(noreplace,missingok) /etc/sysconfig/docker
%config(noreplace,missingok) /etc/sysconfig/docker-storage
@ -215,6 +212,72 @@ fi
%endif
%changelog
* Thu Sep 15 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-309
- Type:CVE
- CVE:CVE-2022-36109
- SUG:NA
- DESC:fix CVE-2022-36109
* Tue Sep 13 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-308
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:Add an ExitPid field for State struct to record exit process id
* Tue Sep 13 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-307
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:fix terminal abnormal after docker run
* Wed Jun 29 2022 zjw<zhongjiawei1@huawei.com> - 18.09.0-306
- Type:CVE
- CVE:CVE-2021-41092
- SUG:NA
- DESC:fix CVE-2021-41092
* Wed Jun 29 2022 zjw<zhongjiawei1@huawei.com> - 18.09.0-305
- Type:CVE
- CVE:CVE-2021-41091
- SUG:NA
- DESC:fix CVE-2021-41091
* Wed Jun 29 2022 zjw<zhongjiawei1@huawei.com> - 18.09.0-304
- Type:CVE
- CVE:CVE-2021-41089
- SUG:NA
- DESC:fix CVE-2021-41089
* Wed Jun 29 2022 zjw<zhongjiawei1@huawei.com> - 18.09.0-303
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:close channel in write side to avoid panic in docker stats
* Tue Jun 28 2022 zjw<zhongjiawei1@huawei.com> - 18.09.0-302
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:fix status inconsistent after restart container
* Thu Jun 16 2022 duyiwei <duyiwei@kylinos.cn> - 18.09.0-301
- Type:bugfix
- CVE:CVE-2022-24769
- SUG:NA
- DESC:fix CVE-2022-24769
* Tue Mar 22 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-300
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:sync from internal
* Wed Mar 02 2022 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-120
- Type:bugfix
- CVE:NA
- SUG:NA
- DESC:Use original process spec for execs
* Tue Dec 28 2021 chenjiankun<chenjiankun1@huawei.com> - 18.09.0-119
- Type:bugfix
- CVE:NA
@ -261,14 +324,14 @@ fi
- SUG:restart
- DESC:remove go-md2man build require
* Mon Jan 4 2021 yangyanchao<yangyanchao6@huawei.com> - 18.09.0-111
* Mon Jan 18 2021 yangyanchao<yangyanchao6@huawei.com> - 18.09.0-111
- Type:requirement
- ID:NA
- CVE:NA
- SUG:restart
- docker:components:add config files for riscv
* Mon Jan 18 2021 jingrui<jingrui@huawei.com> - 18.09.0-107
* Mon Jan 4 2021 jingrui<jingrui@huawei.com> - 18.09.0-107
- Type:bugfix
- ID:NA
- SUG:NA

View File

@ -1 +1 @@
af8d88876dfcaa318a93a1a410ab9878bfb1255b
1f53e790e570d524f6ebf5b81c914ddda97f0924

View File

@ -0,0 +1,27 @@
From 9bc663c3332937cdb55aa5e31957678fe605b168 Mon Sep 17 00:00:00 2001
From: xiangrenzhi <xiangrenzhi@huawei.com>
Date: Thu, 25 Feb 2021 09:27:42 +0800
Subject: [PATCH] docker: fix images filter when use multi reference filter
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
---
components/engine/daemon/images/images.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/components/engine/daemon/images/images.go b/components/engine/daemon/images/images.go
index 49212341c..94e0c1eb8 100644
--- a/components/engine/daemon/images/images.go
+++ b/components/engine/daemon/images/images.go
@@ -152,6 +152,9 @@ func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttr
if matchErr != nil {
return nil, matchErr
}
+ if found {
+ break
+ }
}
if !found {
continue
--
2.19.1

View File

@ -0,0 +1,26 @@
From ac36676aac3f2dfca8e1ac31115417919b9e0160 Mon Sep 17 00:00:00 2001
From: xiangrenzhi <xiangrenzhi@huawei.com>
Date: Thu, 25 Feb 2021 09:37:29 +0800
Subject: [PATCH] docker: fix docker rmi stucking
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
---
components/engine/daemon/images/image_delete.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/components/engine/daemon/images/image_delete.go b/components/engine/daemon/images/image_delete.go
index 94d6f872d..fbd6c16b7 100644
--- a/components/engine/daemon/images/image_delete.go
+++ b/components/engine/daemon/images/image_delete.go
@@ -369,7 +369,7 @@ func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictTyp
if mask&conflictRunningContainer != 0 {
// Check if any running container is using the image.
running := func(c *container.Container) bool {
- return c.IsRunning() && c.ImageID == imgID
+ return c.ImageID == imgID && c.IsRunning()
}
if container := i.containers.First(running); container != nil {
return &imageDeleteConflict{
--
2.19.1

View File

@ -0,0 +1,35 @@
From a0a85fc867a59c1ae7b6f4a36b624224dfdedeea Mon Sep 17 00:00:00 2001
From: xiangrenzhi <xiangrenzhi@huawei.com>
Date: Thu, 25 Feb 2021 09:42:04 +0800
Subject: [PATCH] docker: fix network sandbox not cleaned up on failure
Signed-off-by: xiangrenzhi <xiangrenzhi@huawei.com>
---
components/engine/daemon/container_operations.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/components/engine/daemon/container_operations.go b/components/engine/daemon/container_operations.go
index 909c7ccb2..39b52b037 100644
--- a/components/engine/daemon/container_operations.go
+++ b/components/engine/daemon/container_operations.go
@@ -498,7 +498,7 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
}
}
-func (daemon *Daemon) allocateNetwork(container *container.Container) error {
+func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr error) {
start := time.Now()
controller := daemon.netController
@@ -566,7 +566,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
}
updateSandboxNetworkSettings(container, sb)
defer func() {
- if err != nil {
+ if retErr != nil {
sb.Delete()
}
}()
--
2.19.1

View File

@ -0,0 +1,88 @@
From 8034f96d1500dac8af17449b9dba01b07b956a04 Mon Sep 17 00:00:00 2001
From: xiadanni <xiadanni1@huawei.com>
Date: Tue, 2 Mar 2021 09:31:44 +0800
Subject: [PATCH] docker: fix container status not consistent with its shim
process status
1. fix containerd-shim residual when kill containerd during start container
If containerd is killed after shim and container init process started,
new containerd process will not clean them during load-task.
But both of t.Start and t.Delete in docker failed because it cannot
connect to containerd. In the meanwhile, docker have not received container
start event yet, so it will not set container status to running.
All of above caused shim and container init process residual but
container status from docker is Created. Even after container is
deleted, shim and init process still exist.
So we add runc delete --force if t.Start failed, which do not need to
send signal through containerd to kill container process.
2. fix shim killed but container status is running
In the similar scene with 1, shim and container init process started,
and start event is sent to dockerd. But containerd is killed and new
containerd process is started before t.Delete, shim will be killed but
container init process is still working, dockerd will not receive
process exit event. So dockerd shows container is running but actually
shim is killed.
So we add runc delete --force if t.Start failed to kill container init
process.
Signed-off-by: xiadanni <xiadanni1@huawei.com>
---
components/engine/libcontainerd/client_daemon.go | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/components/engine/libcontainerd/client_daemon.go b/components/engine/libcontainerd/client_daemon.go
index 502796b..9c65e54 100755
--- a/components/engine/libcontainerd/client_daemon.go
+++ b/components/engine/libcontainerd/client_daemon.go
@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"os"
+ "os/exec"
"path/filepath"
"reflect"
"runtime"
@@ -317,10 +318,9 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
close(stdinCloseSync)
if err := t.Start(ctx); err != nil {
- if _, err := t.Delete(ctx); err != nil {
- c.logger.WithError(err).WithField("container", id).
- Error("failed to delete task after fail start")
- }
+ exec.Command("runc", "--root", "/var/run/docker/runtime-runc/moby", "delete", "--force", id).Run()
+ _, errD := t.Delete(ctx)
+ logrus.Warnf("container %v start failed, delete task, delete err: %v", id, errD)
ctr.setTask(nil)
return -1, wrapError(err)
}
@@ -916,10 +916,7 @@ func (c *client) processEventStream(ctx context.Context, ns string) {
c.logger.WithField("container", ei.ContainerID).Warn("unknown container")
if et == EventExit && ei.ProcessID == ei.ContainerID && c.backend.IsContainerRunning(ei.ContainerID) {
c.logger.WithField("container", ei.ContainerID).Warn("handle exit event force ...")
- c.eventQ.append(ei.ContainerID, func() {
- c.logger.WithField("container", ei.ContainerID).Warnf("handle exit event force: error=%v",
- c.backend.ProcessEvent(ei.ContainerID, et, ei))
- })
+ c.processOrphanEvent(ctr, et, ei)
}
continue
}
@@ -935,6 +932,13 @@ func (c *client) processEventStream(ctx context.Context, ns string) {
}
}
+func (c *client) processOrphanEvent(ctr *container, et EventType, ei EventInfo) {
+ c.eventQ.append(ei.ContainerID, func() {
+ c.logger.WithField("container", ei.ContainerID).Warnf("handle exit event force: error=%v",
+ c.backend.ProcessEvent(ei.ContainerID, et, ei))
+ })
+}
+
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
if err != nil {
--
1.8.3.1

View File

@ -0,0 +1,51 @@
From 06e9b3151585573818df8d890c0be1dc576500e6 Mon Sep 17 00:00:00 2001
From: jingrui <jingrui@huawei.com>
Date: Mon, 1 Feb 2021 16:56:40 +0800
Subject: [PATCH] docker: fix hijack hang
Change-Id: Ica0fe7806227114acfe028b44dfeed70a5dd4577
Signed-off-by: jingrui <jingrui@huawei.com>
---
.../docker/docker/client/container_exec.go | 18 ++++++++-
.../dockerd/hack/malformed_host_override.go | 37 +++++++++++--------
2 files changed, 38 insertions(+), 17 deletions(-)
diff --git a/components/cli/vendor/github.com/docker/docker/client/container_exec.go b/components/cli/vendor/github.com/docker/docker/client/container_exec.go
index 535536b1e0..ac458e9c30 100644
--- a/components/cli/vendor/github.com/docker/docker/client/container_exec.go
+++ b/components/cli/vendor/github.com/docker/docker/client/container_exec.go
@@ -3,6 +3,8 @@ package client // import "github.com/docker/docker/client"
import (
"context"
"encoding/json"
+ "fmt"
+ "time"
"github.com/docker/docker/api/types"
)
@@ -36,8 +38,20 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
- headers := map[string][]string{"Content-Type": {"application/json"}}
- return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+ done := make(chan struct{})
+ var resp types.HijackedResponse
+ var err error
+ go func() {
+ headers := map[string][]string{"Content-Type": {"application/json"}}
+ resp, err = cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+ close(done)
+ }()
+ select {
+ case <-done:
+ return resp, err
+ case <-time.After(5 * time.Minute):
+ return resp, fmt.Errorf("post exec hijacked timeout")
+ }
}
// ContainerExecInspect returns information about a specific exec process on the docker host.
--
2.17.1

View File

@ -0,0 +1,81 @@
From 74bd1d0c00c53f96696663e45507e332684dac7a Mon Sep 17 00:00:00 2001
From: xiadanni <xiadanni1@huawei.com>
Date: Wed, 3 Mar 2021 16:46:50 +0800
Subject: [PATCH] docker: fix docker kill command block
reason:When docker kill command execute with start/restart command
concurrently, kill command may block at <-container.Wait.
As s.waitStop is variable, so there is case that waitStop in Wait
function get a new s.waitStop(the old one is already closed before).
So kill command blocked to wait the new s.waitStop close.
Signed-off-by: xiadanni <xiadanni1@huawei.com>
---
components/engine/container/state.go | 13 +++++++++++--
components/engine/daemon/kill.go | 4 +++-
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/components/engine/container/state.go b/components/engine/container/state.go
index 91ea30a..e9666ed 100644
--- a/components/engine/container/state.go
+++ b/components/engine/container/state.go
@@ -65,6 +65,10 @@ func NewState() *State {
}
}
+func (s State) GetWaitStop() chan struct{} {
+ return s.waitStop
+}
+
// String returns a human-readable description of the state
func (s *State) String() string {
if s.Running {
@@ -179,6 +183,10 @@ const (
// otherwise, the results Err() method will return an error indicating why the
// wait operation failed.
func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus {
+ return s.Wait3(ctx, condition, nil)
+}
+
+func (s *State) Wait3(ctx context.Context, condition WaitCondition, waitStop chan struct{}) <-chan StateStatus {
s.Lock()
defer s.Unlock()
@@ -197,9 +205,10 @@ func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateS
// If we are waiting only for removal, the waitStop channel should
// remain nil and block forever.
- var waitStop chan struct{}
if condition < WaitConditionRemoved {
- waitStop = s.waitStop
+ if waitStop == nil {
+ waitStop = s.waitStop
+ }
}
// Always wait for removal, just in case the container gets removed
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
index d185065..4c8ccf9 100644
--- a/components/engine/daemon/kill.go
+++ b/components/engine/daemon/kill.go
@@ -132,6 +132,8 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
return nil
}
+ waitStop := container.GetWaitStop()
+
// 1. Send SIGKILL
if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil {
// While normally we might "return err" here we're not going to
@@ -166,7 +168,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
// Wait for exit with no timeout.
// Ignore returned status.
- <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
+ <-container.Wait3(context.Background(), containerpkg.WaitConditionNotRunning, waitStop)
return nil
}
--
1.8.3.1

View File

@ -0,0 +1,82 @@
From 9ddd6e47a90ac056d242969ff72bf75a43cc0004 Mon Sep 17 00:00:00 2001
From: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
Date: Thu, 29 Nov 2018 16:14:35 +0900
Subject: [PATCH] pkg/archive: [backport] fix TestTarUntarWithXattr failure on recent
kernel
Recent kernel has strict check for security.capability value.
Fix #38289
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
---
Dockerfile | 1 +
pkg/archive/archive_unix_test.go | 20 ++++++++++++++------
2 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile
index b0895cf5e0..8337653e19 100644
--- a/components/engine/Dockerfile
+++ b/components/engine/Dockerfile
@@ -182,6 +182,7 @@ RUN apt-get update && apt-get install -y \
btrfs-tools \
iptables \
jq \
+ libcap2-bin \
libdevmapper-dev \
libudev-dev \
libsystemd-dev \
diff --git a/components/engine/pkg/archive/archive_unix_test.go b/components/engine/pkg/archive/archive_unix_test.go
index 83deab0840..dc4e1fdae6 100644
--- a/components/engine/pkg/archive/archive_unix_test.go
+++ b/components/engine/pkg/archive/archive_unix_test.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io/ioutil"
"os"
+ "os/exec"
"path/filepath"
"strings"
"syscall"
@@ -222,6 +223,13 @@ func TestTarWithBlockCharFifo(t *testing.T) {
// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows
func TestTarUntarWithXattr(t *testing.T) {
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
+ if _, err := exec.LookPath("setcap"); err != nil {
+ t.Skip("setcap not installed")
+ }
+ if _, err := exec.LookPath("getcap"); err != nil {
+ t.Skip("getcap not installed")
+ }
+
origin, err := ioutil.TempDir("", "docker-test-untar-origin")
assert.NilError(t, err)
defer os.RemoveAll(origin)
@@ -232,8 +240,9 @@ func TestTarUntarWithXattr(t *testing.T) {
assert.NilError(t, err)
err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700)
assert.NilError(t, err)
- err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0)
- assert.NilError(t, err)
+ // there is no known Go implementation of setcap/getcap with support for v3 file capability
+ out, err := exec.Command("setcap", "cap_block_suspend+ep", filepath.Join(origin, "2")).CombinedOutput()
+ assert.NilError(t, err, string(out))
for _, c := range []Compression{
Uncompressed,
@@ -251,10 +260,9 @@ func TestTarUntarWithXattr(t *testing.T) {
if len(changes) != 1 || changes[0].Path != "/3" {
t.Fatalf("Unexpected differences after tarUntar: %v", changes)
}
- capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability")
- if capability == nil && capability[0] != 0x00 {
- t.Fatalf("Untar should have kept the 'security.capability' xattr.")
- }
+ out, err := exec.Command("getcap", filepath.Join(origin, "2")).CombinedOutput()
+ assert.NilError(t, err, string(out))
+ assert.Check(t, is.Contains(string(out), "= cap_block_suspend+ep"), "untar should have kept the 'security.capability' xattr")
}
}
--
2.27.0

View File

@ -0,0 +1,148 @@
From f2656c9524e517878131556988548e28e092b9a9 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 7 Mar 2022 12:00:11 +0800
Subject: [PATCH] docker: fix unit testcase error
---
components/engine/client/hijack_test.go | 3 ++-
components/engine/daemon/daemon_unix_test.go | 10 +++++-----
.../daemon/graphdriver/quota/projectquota_test.go | 2 +-
components/engine/opts/hosts_test.go | 8 ++++----
components/engine/pkg/pidfile/pidfile.go | 2 +-
components/engine/registry/registry_mock_test.go | 2 +-
components/engine/registry/registry_test.go | 3 ++-
7 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/components/engine/client/hijack_test.go b/components/engine/client/hijack_test.go
index d71dc9ea..05e8ca71 100644
--- a/components/engine/client/hijack_test.go
+++ b/components/engine/client/hijack_test.go
@@ -72,7 +72,8 @@ func TestTLSCloseWriter(t *testing.T) {
}
}()
- ts.StartTLS()
+ // certificate file in golang has been deleted
+ ts.Start()
defer ts.Close()
serverURL, err := url.Parse(ts.URL)
diff --git a/components/engine/daemon/daemon_unix_test.go b/components/engine/daemon/daemon_unix_test.go
index d9bba54a..8493a4a1 100644
--- a/components/engine/daemon/daemon_unix_test.go
+++ b/components/engine/daemon/daemon_unix_test.go
@@ -270,27 +270,27 @@ func TestNetworkOptions(t *testing.T) {
func TestGetContainerMountId(t *testing.T) {
id := "56e143922c405419a38b23bfbccc92284f35525e3f2ad7011ea904501ccd1219"
- id1 := getContainerMountId("/var/lib/docker/aufs/mnt/" + id)
+ _, id1 := getContainerMountId("/var/lib/docker/aufs/mnt/" + id)
if id1 != id {
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
}
- id1 = getContainerMountId("/var/lib/docker/devicemapper/mnt/" + id)
+ _, id1 = getContainerMountId("/var/lib/docker/devicemapper/mnt/" + id)
if id1 != id {
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
}
- id1 = getContainerMountId("/var/lib/docker/overlay/" + id + "/merged")
+ _, id1 = getContainerMountId("/var/lib/docker/overlay/" + id + "/merged")
if id1 != id {
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
}
- id1 = getContainerMountId("/var/lib/docker/zfs/graph/" + id)
+ _, id1 = getContainerMountId("/var/lib/docker/zfs/graph/" + id)
if id1 != id {
t.Fatalf("Expected container mount id [%s], but got [%s]", id, id1)
}
- id1 = getContainerMountId("/var/lib/docker/devicemapper_err/mnt" + id)
+ _, id1 = getContainerMountId("/var/lib/docker/devicemapper_err/mnt" + id)
if id1 != "" {
t.Fatalf("Expected a empty container mount id, but got [%s]", id1)
}
diff --git a/components/engine/daemon/graphdriver/quota/projectquota_test.go b/components/engine/daemon/graphdriver/quota/projectquota_test.go
index aa164cc4..1a5ac693 100644
--- a/components/engine/daemon/graphdriver/quota/projectquota_test.go
+++ b/components/engine/daemon/graphdriver/quota/projectquota_test.go
@@ -111,7 +111,7 @@ func wrapQuotaTest(testFunc func(t *testing.T, ctrl *Control, mountPoint, testDi
assert.NilError(t, err)
defer os.RemoveAll(testDir)
- ctrl, err := NewControl(testDir)
+ ctrl, err := NewControl(testDir, "xfs")
assert.NilError(t, err)
testSubDir, err := ioutil.TempDir(testDir, "quota-test")
diff --git a/components/engine/opts/hosts_test.go b/components/engine/opts/hosts_test.go
index cd8c3f91..fbe4b3cc 100644
--- a/components/engine/opts/hosts_test.go
+++ b/components/engine/opts/hosts_test.go
@@ -53,8 +53,8 @@ func TestParseHost(t *testing.T) {
func TestParseDockerDaemonHost(t *testing.T) {
invalids := map[string]string{
- "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
- "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
+ "tcp:a.b.c.d": `parse tcp://tcp:a.b.c.d: invalid port ":a.b.c.d" after host`,
+ "tcp:a.b.c.d/path": `parse tcp://tcp:a.b.c.d/path: invalid port ":a.b.c.d" after host`,
"udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1",
"udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375",
"tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock",
@@ -99,8 +99,8 @@ func TestParseTCP(t *testing.T) {
defaultHTTPHost = "tcp://127.0.0.1:2376"
)
invalids := map[string]string{
- "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d",
- "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path",
+ "tcp:a.b.c.d": `parse tcp://tcp:a.b.c.d: invalid port ":a.b.c.d" after host`,
+ "tcp:a.b.c.d/path": `parse tcp://tcp:a.b.c.d/path: invalid port ":a.b.c.d" after host`,
"udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1",
"udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375",
}
diff --git a/components/engine/pkg/pidfile/pidfile.go b/components/engine/pkg/pidfile/pidfile.go
index 485c0013..ab7484a3 100644
--- a/components/engine/pkg/pidfile/pidfile.go
+++ b/components/engine/pkg/pidfile/pidfile.go
@@ -33,7 +33,7 @@ func isSameApplication(pid int) (bool, error) {
for sc.Scan() {
lens := strings.Split(sc.Text(), ":")
if len(lens) == 2 && strings.TrimSpace(lens[0]) == "Name" {
- if strings.TrimSpace(lens[1]) == os.Args[0] {
+ if _, filename := filepath.Split(os.Args[0]); strings.TrimSpace(lens[1]) == strings.TrimSpace(filename) || strings.TrimSpace(lens[1]) == os.Args[0] {
return true, nil
}
return false, nil
diff --git a/components/engine/registry/registry_mock_test.go b/components/engine/registry/registry_mock_test.go
index bf17eb9f..b80aed15 100644
--- a/components/engine/registry/registry_mock_test.go
+++ b/components/engine/registry/registry_mock_test.go
@@ -112,7 +112,7 @@ func init() {
r.HandleFunc("/v2/version", handlerGetPing).Methods("GET")
testHTTPServer = httptest.NewServer(handlerAccessLog(r))
- testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r))
+ testHTTPSServer = httptest.NewServer(handlerAccessLog(r))
// override net.LookupIP
lookupIP = func(host string) ([]net.IP, error) {
diff --git a/components/engine/registry/registry_test.go b/components/engine/registry/registry_test.go
index b7459471..f909685e 100644
--- a/components/engine/registry/registry_test.go
+++ b/components/engine/registry/registry_test.go
@@ -75,7 +75,8 @@ func TestPingRegistryEndpoint(t *testing.T) {
}
func TestEndpoint(t *testing.T) {
- skip.If(t, os.Getuid() != 0, "skipping test that requires root")
+ // certificate file in golang has been deleted
+ skip.If(t, os.Getuid() == 0, "skipping test that requires root")
// Simple wrapper to fail test if err != nil
expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint {
endpoint, err := NewV1Endpoint(index, "", nil)
--
2.27.0

View File

@ -0,0 +1,44 @@
From 8b41a404dcb0aa7c377b18b5f0627ed379371245 Mon Sep 17 00:00:00 2001
From: jingrui <jingrui@huawei.com>
Date: Thu, 18 Mar 2021 17:28:20 +0800
Subject: [PATCH] docker: use info level for create/start/stop command
Signed-off-by: jingrui <jingrui@huawei.com>
---
.../engine/api/server/middleware/debug.go | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/components/engine/api/server/middleware/debug.go b/components/engine/api/server/middleware/debug.go
index 31165bf91..2c039aa5d 100644
--- a/components/engine/api/server/middleware/debug.go
+++ b/components/engine/api/server/middleware/debug.go
@@ -13,10 +13,25 @@ import (
"github.com/sirupsen/logrus"
)
+func isKeyCmd(method string, uri string) bool {
+ if method != "POST" {
+ return false
+ }
+ if !strings.Contains(uri, "containers") {
+ return false
+ }
+ return strings.Contains(uri, "create") || strings.Contains(uri, "start") || strings.Contains(uri, "stop") || strings.Contains(uri, "kill")
+}
+
// DebugRequestMiddleware dumps the request to logger
func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
- logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
+ if isKeyCmd(r.Method, r.RequestURI) {
+ agent, _ := r.Header["User-Agent"]
+ logrus.Infof("Calling %s %s agent=%v", r.Method, r.RequestURI, agent)
+ } else {
+ logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
+ }
if r.Method != "POST" {
return handler(ctx, w, r, vars)
--
2.23.0

View File

@ -0,0 +1,56 @@
From fa960e384ada593add8e14c4cbc4da5a4ebf095e Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Fri, 16 Apr 2021 19:49:45 +0800
Subject: [PATCH] docker: [backport] Fix for lack of synchronization in daemon/update.go
Conflict:NA
Reference:https://github.com/moby/moby/pull/41999/commits/58825ffc3243f13795b36f430726ae8e3e14bed0
---
components/engine/daemon/update.go | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/components/engine/daemon/update.go b/components/engine/daemon/update.go
index 0ebb139d3..b38db991b 100644
--- a/components/engine/daemon/update.go
+++ b/components/engine/daemon/update.go
@@ -42,20 +42,25 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
restoreConfig := false
backupHostConfig := *container.HostConfig
+
defer func() {
if restoreConfig {
container.Lock()
- container.HostConfig = &backupHostConfig
- container.CheckpointTo(daemon.containersReplica)
+ if !container.RemovalInProgress && !container.Dead {
+ container.HostConfig = &backupHostConfig
+ container.CheckpointTo(daemon.containersReplica)
+ }
container.Unlock()
}
}()
+ container.Lock()
+
if container.RemovalInProgress || container.Dead {
+ container.Unlock()
return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\""))
}
- container.Lock()
if err := container.UpdateContainer(hostConfig); err != nil {
restoreConfig = true
container.Unlock()
@@ -66,6 +71,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
container.Unlock()
return errCannotUpdate(container.ID, err)
}
+
container.Unlock()
// if Restart Policy changed, we need to update container monitor
--
2.27.0

View File

@ -0,0 +1,87 @@
From f29dda9acd7a071ab2e4a86f820be236a23838f0 Mon Sep 17 00:00:00 2001
From: Miloslav Trmač <mitr@redhat.com>
Date: Thu, 6 Sep 2018 23:24:06 +0200
Subject: [PATCH] docker: [backport] Don't fail on two concurrent reference.store.AddDigest calls
reference.store.addReference fails when adding a digest reference
that already exists (regardless of the reference target). Both
callers (via reference.store.AddDigest) do check in advance, using
reference.store.Get, whether the digest reference exists before
calling AddDigest, but the reference store lock is released between
the two calls, so if another thread sets the reference in the meantime,
AddDigest may fail with
> Cannot overwrite digest ...
.
Handle this by checking that the pre-existing reference points at the
same image, i.e. that there is nothing to do, and succeeding immediately
in that case. This is even cheaper, avoids a reference.store.save() call.
(In principle, the same failure could have happened via
reference.store.AddTag, as
> Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option
but almost all callers (except for migrate/v1.Migrate, which is run
single-threaded anyway) set the "force" parameter of AddTag to true,
which makes the race invisible. This commit does not change the behavior
of that case, except for speeding it up by avoiding the
reference.store.save() call.)
The existing reference.store.Get checks are now, in a sense, redundant
as such, but their existence allows the callers to provide nice
context-dependent error messages, so this commit leaves them unchanged.
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
Conflict:NA
Reference:https://github.com/moby/moby/commit/f29dda9acd7a071ab2e4a86f820be236a23838f0
---
components/engine/reference/store.go | 5 +++++
components/engine/reference/store_test.go | 8 ++++++++
2 files changed, 13 insertions(+)
diff --git a/components/engine/reference/store.go b/components/engine/reference/store.go
index b01051bf58..b942c42ca2 100644
--- a/components/engine/reference/store.go
+++ b/components/engine/reference/store.go
@@ -149,6 +149,11 @@ func (store *store) addReference(ref reference.Named, id digest.Digest, force bo
oldID, exists := repository[refStr]
if exists {
+ if oldID == id {
+ // Nothing to do. The caller may have checked for this using store.Get in advance, but store.mu was unlocked in the meantime, so this can legitimately happen nevertheless.
+ return nil
+ }
+
// force only works for tags
if digested, isDigest := ref.(reference.Canonical); isDigest {
return errors.WithStack(conflictingTagError("Cannot overwrite digest " + digested.Digest().String()))
diff --git a/components/engine/reference/store_test.go b/components/engine/reference/store_test.go
index 1ce674cbfb..435409d358 100644
--- a/components/engine/reference/store_test.go
+++ b/components/engine/reference/store_test.go
@@ -163,6 +163,10 @@ func TestAddDeleteGet(t *testing.T) {
if err = store.AddTag(ref4, testImageID2, false); err != nil {
t.Fatalf("error adding to store: %v", err)
}
+ // Write the same values again; should silently succeed
+ if err = store.AddTag(ref4, testImageID2, false); err != nil {
+ t.Fatalf("error redundantly adding to store: %v", err)
+ }
ref5, err := reference.ParseNormalizedNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c")
if err != nil {
@@ -171,6 +175,10 @@ func TestAddDeleteGet(t *testing.T) {
if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil {
t.Fatalf("error adding to store: %v", err)
}
+ // Write the same values again; should silently succeed
+ if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil {
+ t.Fatalf("error redundantly adding to store: %v", err)
+ }
// Attempt to overwrite with force == false
if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") {
--
2.27.0

View File

@ -0,0 +1,40 @@
From 57bbb50663f80e78cbdb5283b28be19b64f14ea9 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 13 May 2021 11:15:40 +0800
Subject: [PATCH] docker: [backport] Unexport testcase.Cleanup to fix Go 1.14
Conflict:NA
Reference:https://github.com/gotestyourself/gotest.tools/pull/169/commits/6bc35c2eea35a967a8fe3cf05f491da2cc1793d0
---
components/engine/vendor/gotest.tools/x/subtest/context.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/components/engine/vendor/gotest.tools/x/subtest/context.go b/components/engine/vendor/gotest.tools/x/subtest/context.go
index 878bdebf1..bcf13eed5 100644
--- a/components/engine/vendor/gotest.tools/x/subtest/context.go
+++ b/components/engine/vendor/gotest.tools/x/subtest/context.go
@@ -27,9 +27,9 @@ func (tc *testcase) Ctx() context.Context {
return tc.ctx
}
-// Cleanup runs all cleanup functions. Functions are run in the opposite order
+// cleanup runs all cleanup functions. Functions are run in the opposite order
// in which they were added. Cleanup is called automatically before Run exits.
-func (tc *testcase) Cleanup() {
+func (tc *testcase) cleanup() {
for _, f := range tc.cleanupFuncs {
// Defer all cleanup functions so they all run even if one calls
// t.FailNow() or panics. Deferring them also runs them in reverse order.
@@ -59,7 +59,7 @@ type parallel interface {
func Run(t *testing.T, name string, subtest func(t TestContext)) bool {
return t.Run(name, func(t *testing.T) {
tc := &testcase{TB: t}
- defer tc.Cleanup()
+ defer tc.cleanup()
subtest(tc)
})
}
--
2.27.0

View File

@ -0,0 +1,79 @@
From 782d36eae49ceff3e4fbd43c5a8112d9958dc791 Mon Sep 17 00:00:00 2001
From: Stephen Benjamin <stephen@redhat.com>
Date: Tue, 3 Sep 2019 10:56:45 -0400
Subject: [PATCH] archive: [backport] fix race condition in cmdStream
There is a race condition in pkg/archive when using `cmd.Start` for pigz
and xz where the `*bufio.Reader` could be returned to the pool while the
command is still writing to it, and then picked up and used by a new
command.
The command is wrapped in a `CommandContext` where the process will be
killed when the context is cancelled, however this is not instantaneous,
so there's a brief window while the command is still running but the
`*bufio.Reader` was already returned to the pool.
wrapReadCloser calls `cancel()`, and then `readBuf.Close()` which
eventually returns the buffer to the pool. However, because cmdStream
runs `cmd.Wait` in a go routine that we never wait for to finish, it is
not safe to return the reader to the pool yet. We need to ensure we
wait for `cmd.Wait` to finish!
Signed-off-by: Stephen Benjamin <stephen@redhat.com>
(cherry picked from commit 89dd10b06efe93d4f427057f043abf560c461281)
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
---
components/engine/pkg/archive/archive.go | 12 +++++++++++-
components/engine/pkg/archive/archive_test.go | 4 +++-
2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/components/engine/pkg/archive/archive.go b/components/engine/pkg/archive/archive.go
index 070dccb756..82cd0a6c6f 100644
--- a/components/engine/pkg/archive/archive.go
+++ b/components/engine/pkg/archive/archive.go
@@ -1216,6 +1216,9 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
return nil, err
}
+ // Ensure the command has exited before we clean anything up
+ done := make(chan struct{})
+
// Copy stdout to the returned pipe
go func() {
if err := cmd.Wait(); err != nil {
@@ -1223,9 +1226,16 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
} else {
pipeW.Close()
}
+ close(done)
}()
- return pipeR, nil
+ return ioutils.NewReadCloserWrapper(pipeR, func() error {
+ // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
+ // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
+ err := pipeR.Close()
+ <-done
+ return err
+ }), nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
diff --git a/components/engine/pkg/archive/archive_test.go b/components/engine/pkg/archive/archive_test.go
index b448bac49a..f77b7c202d 100644
--- a/components/engine/pkg/archive/archive_test.go
+++ b/components/engine/pkg/archive/archive_test.go
@@ -1356,7 +1356,9 @@ func TestPigz(t *testing.T) {
_, err := exec.LookPath("unpigz")
if err == nil {
t.Log("Tested whether Pigz is used, as it installed")
- assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{}))
+ // For the command wait wrapper
+ cmdWaitCloserWrapper := contextReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper)
+ assert.Equal(t, reflect.TypeOf(cmdWaitCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{}))
} else {
t.Log("Tested whether Pigz is not used, as it not installed")
assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{}))
--
2.27.0

View File

@ -0,0 +1,64 @@
From 20b8dbbf705988f94d16a401e9d4f510387cbd0d Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 7 Jun 2021 11:23:33 +0800
Subject: [PATCH] docker: fix runc data and dm left when periodically kill
containerd
---
components/engine/daemon/start.go | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go
index 07bffaa27..7a7e2b2ee 100644
--- a/components/engine/daemon/start.go
+++ b/components/engine/daemon/start.go
@@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon"
import (
"context"
+ "os/exec"
"runtime"
"time"
@@ -14,6 +15,12 @@ import (
"github.com/sirupsen/logrus"
)
+const RootDirectory = "/var/run/docker/runtime-runc/moby"
+
+func deleteForce(containerID string) error {
+ return exec.Command("runc", "--root", RootDirectory, "delete", "--force", containerID).Run()
+}
+
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
if checkpoint != "" && !daemon.HasExperimental() {
@@ -210,7 +217,11 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
if err != nil {
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
logrus.WithError(err).WithField("container", container.ID).
- Error("failed to delete failed start container")
+ Error("failed to delete failed start container, try to delete directly")
+ err := deleteForce(container.ID)
+ if err != nil {
+ logrus.Errorf("failed to directly delete container %s", container.ID)
+ }
}
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
}
@@ -273,6 +284,11 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
container.CancelAttachContext()
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
- logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
+ logrus.Errorf("%s cleanup: failed to delete container from containerd, try to delete directly: %v", container.ID, err)
+
+ err := deleteForce(container.ID)
+ if err != nil {
+ logrus.Errorf("%s cleanup: failed to directly delete container", container.ID)
+ }
}
}
--
2.27.0

View File

@ -0,0 +1,82 @@
From 210d1acba11aee0cb4a543fa97feb9ecfc4ba532 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Tue, 15 Jun 2021 20:51:10 +0800
Subject: [PATCH] docker: fix ProcessEvent block when CloseStreams block
The ProcessEvent function will block if the CloseStreams function block in
exit event processing. The reason is the ProcessEvent function is serial
processing. So we need add a timeout mechanism to deal with it.
---
components/engine/container/stream/streams.go | 42 ++++++++++++-------
1 file changed, 27 insertions(+), 15 deletions(-)
diff --git a/components/engine/container/stream/streams.go b/components/engine/container/stream/streams.go
index 585f9e8e3..1a7ef33d4 100644
--- a/components/engine/container/stream/streams.go
+++ b/components/engine/container/stream/streams.go
@@ -7,6 +7,7 @@ import (
"io/ioutil"
"strings"
"sync"
+ "time"
"github.com/containerd/containerd/cio"
"github.com/docker/docker/pkg/broadcaster"
@@ -92,27 +93,38 @@ func (c *Config) NewNopInputPipe() {
// CloseStreams ensures that the configured streams are properly closed.
func (c *Config) CloseStreams() error {
- var errors []string
+ done := make(chan struct{})
+ var errorsInLine error
- if c.stdin != nil {
- if err := c.stdin.Close(); err != nil {
- errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
+ go func() {
+ var errors []string
+ if c.stdin != nil {
+ if err := c.stdin.Close(); err != nil {
+ errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
+ }
}
- }
- if err := c.stdout.Clean(); err != nil {
- errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
- }
+ if err := c.stdout.Clean(); err != nil {
+ errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
+ }
- if err := c.stderr.Clean(); err != nil {
- errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
- }
+ if err := c.stderr.Clean(); err != nil {
+ errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
+ }
- if len(errors) > 0 {
- return fmt.Errorf(strings.Join(errors, "\n"))
- }
+ if len(errors) > 0 {
+ errorsInLine = fmt.Errorf(strings.Join(errors, "\n"))
+ }
+
+ close(done)
+ }()
- return nil
+ select {
+ case <-done:
+ return errorsInLine
+ case <-time.After(3 * time.Second):
+ return fmt.Errorf("close stream timeout")
+ }
}
// CopyToPipe connects streamconfig with a libcontainerd.IOPipe
--
2.27.0

View File

@ -0,0 +1,90 @@
From c79f7bc343ebb9b855e7a28282d8c9ebcaf7e63c Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 5 Aug 2021 15:12:14 +0800
Subject: [PATCH] docker: check db file size before start containerd
if the db file's metadata is damaged, the db will load failed
with error "file size too small" when starting. we need to check it
before start containerd.
---
components/engine/cmd/dockerd/daemon.go | 45 +++++++++++++------------
1 file changed, 24 insertions(+), 21 deletions(-)
diff --git a/components/engine/cmd/dockerd/daemon.go b/components/engine/cmd/dockerd/daemon.go
index 04bc06b92..a96c9d98b 100644
--- a/components/engine/cmd/dockerd/daemon.go
+++ b/components/engine/cmd/dockerd/daemon.go
@@ -113,28 +113,29 @@ func resumeDM() {
}
}
-func cleanupLocalDB(db string) {
- _, err := os.Stat(db)
- if err == nil {
- err = os.Remove(db)
- logrus.Infof("cleanup DB %s error=%v", db, err)
+func cleanupLocalDB(db string, checkSize bool) {
+ if info, err := os.Stat(db); err == nil {
+ if checkSize == false || int(info.Size()) < 2*os.Getpagesize() {
+ err = os.Remove(db)
+ logrus.Infof("cleanup DB %s error=%v", db, err)
+ }
}
}
// DB files may corrupted on exception poweroff but can be rebuild at run time,
// so we can remove DB files on OS starts avoid daemon can not startup.
func cleanupLocalDBs(run, root string) {
+ checkSize := true
+
// check db lock is exist, do nothing if file is existed
dbLockPath := filepath.Join(run, "dblock")
- _, err := os.Stat(dbLockPath)
- if err == nil {
- return
- }
- if !os.IsNotExist(err) {
- logrus.Errorf("stat dblock failed %v", err)
- return
+ _, statErr := os.Stat(dbLockPath)
+ if os.IsNotExist(statErr) {
+ checkSize = false
+ logrus.Errorf("stat dblock failed %v", statErr)
+ logrus.Devour(ioutil.WriteFile(dbLockPath, []byte{}, 0600))
}
- logrus.Devour(ioutil.WriteFile(dbLockPath, []byte{}, 0600))
+
files, err := ioutil.ReadDir(filepath.Join(run, "containerd"))
logrus.Devour(err)
olds, err := ioutil.ReadDir(filepath.Join(run, "libcontainerd"))
@@ -145,17 +146,19 @@ func cleanupLocalDBs(run, root string) {
return
}
}
+
if os.Getenv("DISABLE_CRASH_FILES_DELETE") == "true" {
return
}
- cleanupLocalDB(filepath.Join(root, "containerd/daemon/io.containerd.metadata.v1.bolt/meta.db"))
- cleanupLocalDB(filepath.Join(root, "builder/fscache.db"))
- cleanupLocalDB(filepath.Join(root, "volumes/metadata.db"))
- cleanupLocalDB(filepath.Join(root, "network/files/local-kv.db"))
- cleanupLocalDB(filepath.Join(root, "accelerator/accel.db"))
- cleanupLocalDB(filepath.Join(root, "buildkit/metadata.db"))
- cleanupLocalDB(filepath.Join(root, "buildkit/cache.db"))
- cleanupLocalDB(filepath.Join(root, "buildkit/snapshots.db"))
+
+ cleanupLocalDB(filepath.Join(root, "containerd/daemon/io.containerd.metadata.v1.bolt/meta.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "builder/fscache.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "volumes/metadata.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "network/files/local-kv.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "accelerator/accel.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "buildkit/metadata.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "buildkit/cache.db"), checkSize)
+ cleanupLocalDB(filepath.Join(root, "buildkit/snapshots.db"), checkSize)
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
--
2.27.0

View File

@ -0,0 +1,25 @@
From 372bbea9041ab101156c881232d83d3e3124fd25 Mon Sep 17 00:00:00 2001
From: WangFengTu <wangfengtu@huawei.com>
Date: Sun, 29 Aug 2021 15:49:03 +0800
Subject: [PATCH] fix dangling unpigz
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
---
components/engine/builder/dockerfile/copy.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/components/engine/builder/dockerfile/copy.go b/components/engine/builder/dockerfile/copy.go
index ad9b08dfe..c323e7033 100644
--- a/components/engine/builder/dockerfile/copy.go
+++ b/components/engine/builder/dockerfile/copy.go
@@ -527,6 +527,7 @@ func isArchivePath(driver containerfs.ContainerFS, path string) bool {
if err != nil {
return false
}
+ defer rdr.Close()
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
--
2.23.0

View File

@ -0,0 +1,86 @@
From 0ebaeb1830b42642ae78920afafcadc381053a1e Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 30 Aug 2021 20:44:36 +0800
Subject: [PATCH] docker:add timeout for IO.Wait
---
.../containerd/containerd/process.go | 40 +++++++++++++------
1 file changed, 28 insertions(+), 12 deletions(-)
diff --git a/components/engine/vendor/github.com/containerd/containerd/process.go b/components/engine/vendor/github.com/containerd/containerd/process.go
index 4d0dca9f7..a2aaa424b 100644
--- a/components/engine/vendor/github.com/containerd/containerd/process.go
+++ b/components/engine/vendor/github.com/containerd/containerd/process.go
@@ -18,6 +18,7 @@ package containerd
import (
"context"
+ "fmt"
"strings"
"syscall"
"time"
@@ -105,6 +106,21 @@ func (p *process) Pid() uint32 {
return p.pid
}
+func waitTimeout(io cio.IO, timeout time.Duration) error {
+ done := make(chan struct{})
+ go func() {
+ io.Wait()
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ return nil
+ case <-time.After(timeout):
+ return fmt.Errorf("Wait IO timeout")
+ }
+}
+
// Start starts the exec process
func (p *process) Start(ctx context.Context) error {
r, err := p.task.client.TaskService().Start(ctx, &tasks.StartRequest{
@@ -112,19 +128,14 @@ func (p *process) Start(ctx context.Context) error {
ExecID: p.id,
})
if err != nil {
- done := make(chan struct{})
- go func() {
- p.io.Cancel()
- p.io.Wait()
- p.io.Close()
- close(done)
- }()
- select {
- case <-time.After(30 * time.Second):
+ p.io.Cancel()
+
+ errWait := waitTimeout(p.io, 30*time.Second)
+ if errWait != nil {
logrus.Warnf("process start failed with error %v, wait io close timeout, some fifo io may be dropped.", err)
- case <-done:
- // ok
}
+ p.io.Close()
+
return errdefs.FromGRPC(err)
}
p.pid = r.Pid
@@ -221,7 +232,12 @@ func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitS
}
if p.io != nil {
p.io.Cancel()
- p.io.Wait()
+
+ err := waitTimeout(p.io, 3*time.Second)
+ if err != nil {
+ logrus.Warnf("Wait io close timeout, some fifo io may be dropped.")
+ }
+
p.io.Close()
}
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
--
2.27.0

View File

@ -0,0 +1,38 @@
From aa1e1d6caf6983e6242a13b4cf98497161a7abb5 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Sat, 11 Sep 2021 11:45:53 +0800
Subject: [PATCH] docker:fix time Ticker leak
Tick's Ticker cannot be recovered by the garbage collector, it will
leak and cause CPU usage high in this case. We should replace it with
NewTicker and explicitly Stop it.
---
components/engine/daemon/freezer/freezer.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/components/engine/daemon/freezer/freezer.go b/components/engine/daemon/freezer/freezer.go
index 907c7aac2..6df176f2f 100644
--- a/components/engine/daemon/freezer/freezer.go
+++ b/components/engine/daemon/freezer/freezer.go
@@ -184,7 +184,8 @@ func (f *freezer) updateCgroup(state string) error {
curState = strings.TrimSpace(curState)
timeout := time.After(30 * time.Second)
- tick := time.Tick(1 * time.Millisecond)
+ ticker := time.NewTicker(1 * time.Millisecond)
+ defer ticker.Stop()
for {
select {
case <-timeout:
@@ -192,7 +193,7 @@ func (f *freezer) updateCgroup(state string) error {
return fmt.Errorf("cannot write %s to freezer for %#v", curState, err)
}
return fmt.Errorf("update freezer cgroup timeout for 30s")
- case <-tick:
+ case <-ticker.C:
// In case this loop does not exit because it doesn't get the expected
// state, let's write again this state, hoping it's going to be properly
// set this time. Otherwise, this loop could run infinitely, waiting for
--
2.27.0

View File

@ -0,0 +1,66 @@
From 1cbe2e6c0865f11fa264c24378bb0180cce6d414 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Wed, 22 Sep 2021 16:09:44 +0800
Subject: [PATCH] docker:fix bug where failed kills didnt fallback to unix kill
if killPossiblyDeadProcess fails, we expect to execute killProcessDirectly to
direct kill the process. But container.Wait return err when the timeout deadline
exceeded, and not execute the killProcessDirectly fucntion. Then docker stop will
hang.
---
components/engine/daemon/kill.go | 14 +++++++++-----
components/engine/daemon/stop.go | 6 ++++--
2 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
index 4c8ccf93d..593275cf8 100644
--- a/components/engine/daemon/kill.go
+++ b/components/engine/daemon/kill.go
@@ -153,8 +153,8 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
- if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
- return err
+ if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() == nil {
+ return nil
}
}
@@ -166,9 +166,13 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
return err
}
- // Wait for exit with no timeout.
- // Ignore returned status.
- <-container.Wait3(context.Background(), containerpkg.WaitConditionNotRunning, waitStop)
+ // wait for container to exit one last time, if it doesn't then kill didnt work, so return error
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel2()
+
+ if status := <-container.Wait3(ctx2, containerpkg.WaitConditionNotRunning, waitStop); status.Err() != nil {
+ return errors.New("tried to kill container, but did not receive an exit event")
+ }
return nil
}
diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go
index 40bc36dfd..741f5d5dd 100644
--- a/components/engine/daemon/stop.go
+++ b/components/engine/daemon/stop.go
@@ -82,8 +82,10 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal)
// 3. If it doesn't, then send SIGKILL
if err := daemon.Kill(container); err != nil {
- // Wait without a timeout, ignore result.
- <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ <-container.Wait(ctx, containerpkg.WaitConditionNotRunning)
logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
}
}
--
2.27.0

View File

@ -0,0 +1,31 @@
From dd4eb547134482edc9d3248870480c3f24cab655 Mon Sep 17 00:00:00 2001
From: WangFengTu <wangfengtu@huawei.com>
Date: Mon, 18 Oct 2021 16:14:15 +0800
Subject: [PATCH] do not check result of issueDiscard
If device not exist, issueDiscard will fail.
We expect deleteDevice success if device not exist.
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
---
components/engine/daemon/graphdriver/devmapper/deviceset.go | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/components/engine/daemon/graphdriver/devmapper/deviceset.go b/components/engine/daemon/graphdriver/devmapper/deviceset.go
index 9b6cb0212..caa0a64cc 100644
--- a/components/engine/daemon/graphdriver/devmapper/deviceset.go
+++ b/components/engine/daemon/graphdriver/devmapper/deviceset.go
@@ -2078,9 +2078,7 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
defer devices.closeTransaction()
if devices.doBlkDiscard {
- if err := devices.issueDiscard(info); err != nil {
- return err
- }
+ devices.issueDiscard(info)
}
// Try to deactivate device in case it is active.
--
2.27.0

View File

@ -0,0 +1,69 @@
From deb30c8d68ff1199b4cbe4822fc8336ff65f6e1f Mon Sep 17 00:00:00 2001
From: WangFengTu <wangfengtu@huawei.com>
Date: Wed, 3 Nov 2021 13:34:53 +0800
Subject: [PATCH] add info log for pulling image
Signed-off-by: WangFengTu <wangfengtu@huawei.com>
---
.../api/server/router/image/image_routes.go | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/components/engine/api/server/router/image/image_routes.go b/components/engine/api/server/router/image/image_routes.go
index b7bb340e9..2c14945d2 100644
--- a/components/engine/api/server/router/image/image_routes.go
+++ b/components/engine/api/server/router/image/image_routes.go
@@ -20,12 +20,14 @@ import (
"github.com/docker/docker/registry"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
)
// Creates an image from Pull or from Import
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
+ logrus.Errorf("parse image create http request failed: %v", err)
return err
}
@@ -37,16 +39,26 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
err error
output = ioutils.NewWriteFlusher(w)
platform *specs.Platform
+ sp specs.Platform
)
defer output.Close()
+ logrus.Infof("received image create request, name:%v:%v repo:%v", image, tag, repo)
+ defer func() {
+ if err != nil {
+ logrus.Errorf("image create request process failed, name:%v:%v repo:%v error: %v", image, tag, repo, err)
+ } else {
+ logrus.Infof("image create request process success, name:%v:%v repo:%v", image, tag, repo)
+ }
+ }()
+
w.Header().Set("Content-Type", "application/json")
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.32") {
apiPlatform := r.FormValue("platform")
if apiPlatform != "" {
- sp, err := platforms.Parse(apiPlatform)
+ sp, err = platforms.Parse(apiPlatform)
if err != nil {
return err
}
@@ -70,7 +82,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
authConfig := &types.AuthConfig{}
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
- if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
+ if err = json.NewDecoder(authJSON).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = &types.AuthConfig{}
--
2.23.0

View File

@ -0,0 +1,72 @@
From 3fab78a174b23d012a71f96fd4cdc7590706323e Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 8 Nov 2021 20:23:08 +0800
Subject: [PATCH] docker: Adding logs for debugging in docker stop
do the following logs for debug
1. add container id in logs
2. add logs for each "kill"
3. sync with community
---
components/engine/daemon/container_operations_unix.go | 2 +-
components/engine/daemon/stop.go | 10 ++++++----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go
index df2f3261f..2ea167ca2 100644
--- a/components/engine/daemon/container_operations_unix.go
+++ b/components/engine/daemon/container_operations_unix.go
@@ -345,7 +345,6 @@ func killProcessDirectly(cntr *container.Container) error {
if status.Err() != nil {
// Ensure that we don't kill ourselves
if pid := cntr.GetPID(); pid != 0 {
- logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
pattern := fmt.Sprintf("/var/run/docker/containerd/exit/moby/%s.%d.*", cntr.ID, pid)
efiles, err := filepath.Glob(pattern)
if err != nil {
@@ -356,6 +355,7 @@ func killProcessDirectly(cntr *container.Container) error {
return errNoSuchProcess{pid, 9}
}
+ logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
if err := unix.Kill(pid, 9); err != nil {
if err != unix.ESRCH {
return err
diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go
index 741f5d5dd..633a34aab 100644
--- a/components/engine/daemon/stop.go
+++ b/components/engine/daemon/stop.go
@@ -48,7 +48,7 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
stopSignal := container.StopSignal()
// 1. Send a stop signal
if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
- logrus.Infof("docker send %d signal to stop container get error: %v", stopSignal, err)
+ logrus.Infof("docker send %d signal to stop container %v get error: %v", stopSignal, container.ID, err)
// While normally we might "return err" here we're not going to
// because if we can't stop the container by this point then
// it's probably because it's already stopped. Meaning, between
@@ -63,7 +63,7 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
defer cancel()
if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
- logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal)
+ logrus.Infof("Container %v failed to stop after sending signal %d to the process, force killing", container.ID, stopSignal)
if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
return err
}
@@ -85,8 +85,10 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
- <-container.Wait(ctx, containerpkg.WaitConditionNotRunning)
- logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
+ if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil {
+ logrus.WithError(err).WithField("container", container.ID).Error("Error killing the container")
+ return err
+ }
}
}
--
2.27.0

View File

@ -0,0 +1,45 @@
From b86b55f6bdad46b2fcb955402c512305eb36e90c Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 15 Nov 2021 15:40:55 +0800
Subject: [PATCH] docker: add log for easy debug in exit event handler
---
components/engine/daemon/monitor.go | 2 +-
components/engine/libcontainerd/client_daemon.go | 7 +++++++
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go
index 1b577c0da..0aadf33fd 100644
--- a/components/engine/daemon/monitor.go
+++ b/components/engine/daemon/monitor.go
@@ -58,8 +58,8 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libc
daemon.LogContainerEvent(c, "oom")
case libcontainerd.EventExit:
if int(ei.Pid) == c.Pid {
+ logrus.Infof("handle container %s exit event pid=%d", c.ID, c.Pid)
c.Lock()
- logrus.Infof("handle exit event cid=%s pid=%d", c.ID, c.Pid)
_, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID)
if err != nil {
logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
diff --git a/components/engine/libcontainerd/client_daemon.go b/components/engine/libcontainerd/client_daemon.go
index 9c65e54c3..62e0f58d5 100755
--- a/components/engine/libcontainerd/client_daemon.go
+++ b/components/engine/libcontainerd/client_daemon.go
@@ -726,6 +726,13 @@ func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
}).Error("failed to process event")
}
+ defer func() {
+ if et == EventExit {
+ c.logger.Infof("handled exit event processID=%s containerID=%s pid=%d", ei.ProcessID, ei.ContainerID, ei.Pid)
+ }
+ }()
+
+
if et == EventExit && ei.ProcessID != ei.ContainerID {
p := ctr.getProcess(ei.ProcessID)
if p == nil {
--
2.27.0

View File

@ -0,0 +1,26 @@
From 0f1c3dc7a112d26b45001bf0631e6ae43f7c2f39 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Sun, 21 Nov 2021 14:09:37 +0800
Subject: [PATCH] docker: change log level when containerd return "container
not found" err
---
components/engine/daemon/kill.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
index 593275cf8..3f0972a72 100644
--- a/components/engine/daemon/kill.go
+++ b/components/engine/daemon/kill.go
@@ -105,7 +105,7 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int)
if err := daemon.kill(container, sig); err != nil {
if errdefs.IsNotFound(err) {
unpause = false
- logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'")
+ logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Info("container kill failed because of 'container not found' or 'no such process'")
} else {
return errors.Wrapf(err, "Cannot kill container %s", container.ID)
}
--
2.27.0

View File

@ -0,0 +1,82 @@
From d82a0c7617c5b05871c2cd19812e5bbe539dc1b5 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 9 Dec 2021 11:55:02 +0800
Subject: [PATCH] docker: Fix container exited after docker restart when
processEvent hang
when processEvent hang, container state will not be Exited in time, and
the containerStop in containerRestart will return nill due to "no such
container", and the containerStart in containerRestart will not execute
for the container state is Running.
---
components/engine/container/container.go | 8 ++++++++
components/engine/daemon/container_operations_unix.go | 2 +-
components/engine/daemon/kill.go | 10 ++++++----
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/components/engine/container/container.go b/components/engine/container/container.go
index 7cdf07535..87cdaba2c 100644
--- a/components/engine/container/container.go
+++ b/components/engine/container/container.go
@@ -539,6 +539,14 @@ func (container *Container) StopTimeout() int {
return DefaultStopTimeout
}
+func (container *Container) WaitForState(waitCondition WaitCondition, timeout int) error {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
+ defer cancel()
+
+ status := <-container.Wait(ctx, waitCondition)
+ return status.Err()
+}
+
// InitDNSHostConfig ensures that the dns fields are never nil.
// New containers don't ever have those fields nil,
// but pre created containers can still have those nil values.
diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go
index 2ea167ca2..e1456ce86 100644
--- a/components/engine/daemon/container_operations_unix.go
+++ b/components/engine/daemon/container_operations_unix.go
@@ -361,7 +361,7 @@ func killProcessDirectly(cntr *container.Container) error {
return err
}
e := errNoSuchProcess{pid, 9}
- logrus.Debug(e)
+ logrus.WithError(e).WithField("container", cntr.ID).Warning("no such process")
return e
}
}
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
index 3f0972a72..2652f7ad2 100644
--- a/components/engine/daemon/kill.go
+++ b/components/engine/daemon/kill.go
@@ -147,13 +147,12 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
// by that time the container is still running, then the error
// we got is probably valid and so we return it to the caller.
if isErrNoSuchProcess(err) {
+ // wait the container's stop amount of time to see the event is eventually processed
+ container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
return nil
}
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- defer cancel()
-
- if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() == nil {
+ if waitError := container.WaitForState(containerpkg.WaitConditionNotRunning, 2); waitError == nil {
return nil
}
}
@@ -161,6 +160,9 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := killProcessDirectly(container); err != nil {
if isErrNoSuchProcess(err) {
+ // there is a case where we hit here before the exit event is processed
+ // So let's wait the container's stop timeout amount of time to see if the event is eventually processed
+ container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
return nil
}
return err
--
2.27.0

View File

@ -0,0 +1,34 @@
From ba62de1350b25ec1d85eff67bd3c8c5be98d02a7 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 17 Mar 2022 20:18:30 +0800
Subject: [PATCH] docker: fix "endpoint with name container_xx already exists
in network none" error
---
components/engine/daemon/kill.go | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go
index 2652f7ad2..cb0ec61d1 100644
--- a/components/engine/daemon/kill.go
+++ b/components/engine/daemon/kill.go
@@ -162,7 +162,16 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
if isErrNoSuchProcess(err) {
// there is a case where we hit here before the exit event is processed
// So let's wait the container's stop timeout amount of time to see if the event is eventually processed
- container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout())
+ if err := container.WaitForState(containerpkg.WaitConditionNotRunning, container.StopTimeout()); err != nil {
+ ei := libcontainerd.EventInfo{
+ ContainerID: container.ID,
+ ProcessID: container.ID,
+ Pid: uint32(container.GetPID()),
+ ExitCode: 137,
+ ExitedAt: time.Now(),
+ }
+ daemon.ProcessEvent(container.ID, libcontainerd.EventExit, ei)
+ }
return nil
}
return err
--
2.23.0

View File

@ -0,0 +1,25 @@
From f250af43f458e27e37f2ed2690b320d5bbf80173 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 13 Dec 2021 17:20:13 +0800
Subject: [PATCH] docker: fix "Up 292 years" in status in docker ps -a
---
components/engine/container/state.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/components/engine/container/state.go b/components/engine/container/state.go
index e9666ed92..da19cc49e 100644
--- a/components/engine/container/state.go
+++ b/components/engine/container/state.go
@@ -283,7 +283,7 @@ func (s *State) SetRunning(pid int, initial bool) {
}
s.ExitCodeValue = 0
s.Pid = pid
- if initial {
+ if initial || s.StartedAt.IsZero() {
s.StartedAt = time.Now().UTC()
}
}
--
2.27.0

View File

@ -0,0 +1,111 @@
From 3d3d7570714a8ab60b979eaba39309b6e8fcf75e Mon Sep 17 00:00:00 2001
From: Michael Crosby <crosbymichael@gmail.com>
Date: Wed, 13 Mar 2019 16:04:28 -0400
Subject: [PATCH] Use original process spec for execs
Fixes #38865
Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
(cherry picked from commit 7603c22c7365d7d7150597fe396e0707d6e561da)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Conflict:NA
Reference:https://github.com/docker/engine/pull/178/commits/3d3d7570714a8ab60b979eaba39309b6e8fcf75e
---
components/engine/daemon/exec.go | 24 ++++++++++++++++++------
components/engine/integration/container/exec_test.go | 15 +++++++++++++++
components/engine/integration/internal/container/ops.go | 7 +++++++
3 files changed, 40 insertions(+), 6 deletions(-)
diff --git a/components/engine/daemon/exec.go b/components/engine/daemon/exec.go
index f0b43d7253..abb239b520 100644
--- a/components/engine/daemon/exec.go
+++ b/components/engine/daemon/exec.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
+ "runtime"
"strings"
"time"
@@ -16,7 +17,7 @@ import (
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/term"
- specs "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -217,12 +218,23 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
ec.StreamConfig.NewNopInputPipe()
}
- p := &specs.Process{
- Args: append([]string{ec.Entrypoint}, ec.Args...),
- Env: ec.Env,
- Terminal: ec.Tty,
- Cwd: ec.WorkingDir,
+ p := &specs.Process{}
+ if runtime.GOOS != "windows" {
+ container, err := d.containerdCli.LoadContainer(ctx, ec.ContainerID)
+ if err != nil {
+ return err
+ }
+ spec, err := container.Spec(ctx)
+ if err != nil {
+ return err
+ }
+ p = spec.Process
}
+ p.Args = append([]string{ec.Entrypoint}, ec.Args...)
+ p.Env = ec.Env
+ p.Cwd = ec.WorkingDir
+ p.Terminal = ec.Tty
+
if p.Cwd == "" {
p.Cwd = "/"
}
diff --git a/components/engine/integration/container/exec_test.go b/components/engine/integration/container/exec_test.go
index 20b1f3e8b5..0c3e01af41 100644
--- a/components/engine/integration/container/exec_test.go
+++ b/components/engine/integration/container/exec_test.go
@@ -118,3 +118,18 @@ func TestExec(t *testing.T) {
assert.Assert(t, is.Contains(out, "PWD=/tmp"), "exec command not running in expected /tmp working directory")
assert.Assert(t, is.Contains(out, "FOO=BAR"), "exec command not running with expected environment variable FOO")
}
+
+func TestExecUser(t *testing.T) {
+ skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.39"), "broken in earlier versions")
+ skip.If(t, testEnv.OSType == "windows", "FIXME. Probably needs to wait for container to be in running state.")
+ defer setupTest(t)()
+ ctx := context.Background()
+ client := testEnv.APIClient()
+
+ cID := container.Run(t, ctx, client, container.WithTty(true), container.WithUser("1:1"))
+
+ result, err := container.Exec(ctx, client, cID, []string{"id"})
+ assert.NilError(t, err)
+
+ assert.Assert(t, is.Contains(result.Stdout(), "uid=1(daemon) gid=1(daemon)"), "exec command not running as uid/gid 1")
+}
diff --git a/components/engine/integration/internal/container/ops.go b/components/engine/integration/internal/container/ops.go
index df5598b62f..b2d170b4df 100644
--- a/components/engine/integration/internal/container/ops.go
+++ b/components/engine/integration/internal/container/ops.go
@@ -134,3 +134,10 @@ func WithAutoRemove(c *TestContainerConfig) {
}
c.HostConfig.AutoRemove = true
}
+
+// WithUser sets the user
+func WithUser(user string) func(c *TestContainerConfig) {
+ return func(c *TestContainerConfig) {
+ c.Config.User = user
+ }
+}
--
2.27.0

View File

@ -0,0 +1,81 @@
From d3bf68367fe708a1d74d89a8d57c9b85c4fd292d Mon Sep 17 00:00:00 2001
From: build <build@obs.com>
Date: Thu, 16 Jun 2022 09:53:40 +0800
Subject: [PATCH] CVE-2022-24769
Signed-off-by: build <build@obs.com>
---
components/engine/daemon/exec_linux.go | 10 ++++------
components/engine/daemon/oci.go | 20 ++++++++++++--------
components/engine/oci/defaults.go | 1 -
3 files changed, 16 insertions(+), 15 deletions(-)
diff --git a/components/engine/daemon/exec_linux.go b/components/engine/daemon/exec_linux.go
index cd52f48..8720aa9 100644
--- a/components/engine/daemon/exec_linux.go
+++ b/components/engine/daemon/exec_linux.go
@@ -21,13 +21,11 @@ func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config
}
}
if ec.Privileged {
- if p.Capabilities == nil {
- p.Capabilities = &specs.LinuxCapabilities{}
+ p.Capabilities = &specs.LinuxCapabilities{
+ Bounding: caps.GetAllCapabilities(),
+ Permitted: caps.GetAllCapabilities(),
+ Effective: caps.GetAllCapabilities(),
}
- p.Capabilities.Bounding = caps.GetAllCapabilities()
- p.Capabilities.Permitted = p.Capabilities.Bounding
- p.Capabilities.Inheritable = p.Capabilities.Bounding
- p.Capabilities.Effective = p.Capabilities.Bounding
}
if apparmor.IsEnabled() {
var appArmorProfile string
diff --git a/components/engine/daemon/oci.go b/components/engine/daemon/oci.go
index 52050e2..4148e90 100644
--- a/components/engine/daemon/oci.go
+++ b/components/engine/daemon/oci.go
@@ -26,15 +26,19 @@ func setCapabilities(s *specs.Spec, c *container.Container) error {
return err
}
}
- s.Process.Capabilities.Effective = caplist
- s.Process.Capabilities.Bounding = caplist
- s.Process.Capabilities.Permitted = caplist
- s.Process.Capabilities.Inheritable = caplist
// setUser has already been executed here
- // if non root drop capabilities in the way execve does
- if s.Process.User.UID != 0 {
- s.Process.Capabilities.Effective = []string{}
- s.Process.Capabilities.Permitted = []string{}
+ if s.Process.User.UID == 0 {
+ s.Process.Capabilities = &specs.LinuxCapabilities{
+ Effective: caplist,
+ Bounding: caplist,
+ Permitted: caplist,
+ }
+ } else {
+ // Do not set Effective and Permitted capabilities for non-root users,
+ // to match what execve does.
+ s.Process.Capabilities = &specs.LinuxCapabilities{
+ Bounding: caplist,
+ }
}
return nil
}
diff --git a/components/engine/oci/defaults.go b/components/engine/oci/defaults.go
index ff027d8..57cbddb 100644
--- a/components/engine/oci/defaults.go
+++ b/components/engine/oci/defaults.go
@@ -61,7 +61,6 @@ func DefaultLinuxSpec() specs.Spec {
Capabilities: &specs.LinuxCapabilities{
Bounding: defaultCapabilities(),
Permitted: defaultCapabilities(),
- Inheritable: defaultCapabilities(),
Effective: defaultCapabilities(),
},
},
--
2.33.0

View File

@ -0,0 +1,36 @@
From e37f4e4f738b605fe5ea1030e39da8d723260007 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Fri, 18 Mar 2022 11:19:28 +0800
Subject: [PATCH] docker: fix rwlayer umountd after container restart
if exit event be handled to slow, then the exit event maybe handled again.
we need to add a check after the container lock acquired.
---
components/engine/daemon/monitor.go | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go
index 0aadf33fd..0bf7f0379 100644
--- a/components/engine/daemon/monitor.go
+++ b/components/engine/daemon/monitor.go
@@ -60,6 +60,17 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libc
if int(ei.Pid) == c.Pid {
logrus.Infof("handle container %s exit event pid=%d", c.ID, c.Pid)
c.Lock()
+
+ // ProcessEvent could be called concurrently, and will execute serial
+ // for c.Lock(), but int(ei.Pid) == c.Pid has already pass. It will cause
+ // daemon.Cleanup be called twice. This will make rwlayer umount in docker
+ // restart, get "fork/exec /proc/self/exe: no such file or directory" err.
+ // Adding this under c.Lock(), could avaid daemon.Cleanup be called again.
+ if c.Pid == 0 || int(ei.Pid) != c.Pid {
+ c.Unlock()
+ return nil
+ }
+
_, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID)
if err != nil {
logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
--
2.23.0

View File

@ -0,0 +1,38 @@
From 548078b9e76e34c6994830ce35bee1c15e3c091f Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Mon, 21 Mar 2022 11:05:43 +0800
Subject: [PATCH] docker: close channel in write side to avoid panic in docker
stats
there is a situation when write event to chan c, chan c is close,
and that will cause a panic. Close chan c in write side can avaid
panic.
---
components/cli/cli/command/container/stats.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/components/cli/cli/command/container/stats.go b/components/cli/cli/command/container/stats.go
index 8387fc988..daab91627 100644
--- a/components/cli/cli/command/container/stats.go
+++ b/components/cli/cli/command/container/stats.go
@@ -60,6 +60,9 @@ func runStats(dockerCli command.Cli, opts *statsOptions) error {
// monitorContainerEvents watches for container creation and removal (only
// used when calling `docker stats` without arguments).
monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) {
+ // close channel in write side to avoid panic
+ defer close(c)
+
f := filters.NewArgs()
f.Add("type", "container")
options := types.EventsOptions{
@@ -150,7 +153,6 @@ func runStats(dockerCli command.Cli, opts *statsOptions) error {
eventChan := make(chan events.Message)
go eh.Watch(eventChan)
go monitorContainerEvents(started, eventChan)
- defer close(eventChan)
<-started
// Start a short-lived goroutine to retrieve the initial list of
--
2.23.0

View File

@ -0,0 +1,53 @@
From 80f1169eca587305759829e626cebd2a434664f6 Mon Sep 17 00:00:00 2001
From: Tonis Tiigi <tonistiigi@gmail.com>
Date: Wed, 19 May 2021 16:51:35 -0700
Subject: [PATCH] chrootarchive: don't create parent dirsoutside of chroot
If chroot is used with a special root directory then create
destination directory within chroot. This works automatically
already due to extractor creating parent paths and is only
used currently with cp where parent paths are actually required
and error will be shown to user before reaching this point.
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 52d285184068998c22632bfb869f6294b5613a58)
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
Conflict:NA
Reference:https://github.com/moby/moby/commit/bce32e5c93be4caf1a592582155b9cb837fc129a
---
components/engine/pkg/chrootarchive/archive.go | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/components/engine/pkg/chrootarchive/archive.go b/components/engine/pkg/chrootarchive/archive.go
index 6ff61e6a7..9926b63b8 100644
--- a/components/engine/pkg/chrootarchive/archive.go
+++ b/components/engine/pkg/chrootarchive/archive.go
@@ -65,13 +65,17 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
options.ExcludePatterns = []string{}
}
- idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
- rootIDs := idMapping.RootPair()
+ // If dest is inside a root then directory is created within chroot by extractor.
+ // This case is only currently used by cp.
+ if dest == root {
+ idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
+ rootIDs := idMapping.RootPair()
- dest = filepath.Clean(dest)
- if _, err := os.Stat(dest); os.IsNotExist(err) {
- if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
- return err
+ dest = filepath.Clean(dest)
+ if _, err := os.Stat(dest); os.IsNotExist(err) {
+ if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
+ return err
+ }
}
}
--
2.30.0

View File

@ -0,0 +1,323 @@
From 4d3147906307befb5055d668bb4d55c1f3c03286 Mon Sep 17 00:00:00 2001
From: zhongjiawei <zhongjiawei1@huawei.com>
Date: Thu, 9 Jun 2022 10:48:26 +0800
Subject: [PATCH] docker: Lock down docker root dir perms.
Do not use 0701 perms.
0701 dir perms allows anyone to traverse the docker dir.
It happens to allow any user to execute, as an example, suid binaries
from image rootfs dirs because it allows traversal AND critically
container users need to be able to do execute things.
0701 on lower directories also happens to allow any user to modify
things in, for instance, the overlay upper dir which neccessarily
has 0755 permissions.
This changes to use 0710 which allows users in the group to traverse.
In userns mode the UID owner is (real) root and the GID is the remapped
root's GID.
This prevents anyone but the remapped root to traverse our directories
(which is required for userns with runc).
Conflict:daemon/graphdriver/fuse-overlayfs/fuseoverlayfs.go
Reference:https://github.com/moby/moby/commit/f0ab919f518c47240ea0e72d0999576bb8008e64
---
.../daemon/container_operations_unix.go | 2 +-
components/engine/daemon/create.go | 5 ++--
components/engine/daemon/daemon.go | 5 +++-
components/engine/daemon/daemon_unix.go | 13 +++++-----
.../engine/daemon/graphdriver/aufs/aufs.go | 13 ++++++++--
.../engine/daemon/graphdriver/btrfs/btrfs.go | 18 ++++++++++++--
.../daemon/graphdriver/overlay/overlay.go | 19 +++++++++++----
.../daemon/graphdriver/overlay2/overlay.go | 24 +++++++++++++++----
.../engine/daemon/graphdriver/vfs/driver.go | 16 +++++++++++--
.../engine/daemon/graphdriver/zfs/zfs.go | 11 ++++++++-
10 files changed, 101 insertions(+), 25 deletions(-)
diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go
index e238366c1..5c6a09ce4 100644
--- a/components/engine/daemon/container_operations_unix.go
+++ b/components/engine/daemon/container_operations_unix.go
@@ -425,5 +425,5 @@ func (daemon *Daemon) setupContainerMountsRoot(c *container.Container) error {
if err != nil {
return err
}
- return idtools.MkdirAllAndChown(p, 0701, idtools.CurrentIdentity())
+ return idtools.MkdirAllAndChown(p, 0710, idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: daemon.IdentityMapping().RootPair().GID})
}
diff --git a/components/engine/daemon/create.go b/components/engine/daemon/create.go
index 4d083e703..e3dd598d4 100644
--- a/components/engine/daemon/create.go
+++ b/components/engine/daemon/create.go
@@ -190,10 +190,11 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
return nil, err
}
- if err := idtools.MkdirAndChown(container.Root, 0701, idtools.CurrentIdentity()); err != nil {
+ current := idtools.CurrentIdentity()
+ if err := idtools.MkdirAndChown(container.Root, 0710, idtools.Identity{UID: current.UID, GID: daemon.IdentityMapping().RootPair().GID}); err != nil {
return nil, err
}
- if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, idtools.CurrentIdentity()); err != nil {
+ if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, current); err != nil {
return nil, err
}
diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go
index b3039abf3..5c6be8e45 100644
--- a/components/engine/daemon/daemon.go
+++ b/components/engine/daemon/daemon.go
@@ -913,7 +913,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
}
daemonRepo := filepath.Join(config.Root, "containers")
- if err := idtools.MkdirAllAndChown(daemonRepo, 0701, idtools.CurrentIdentity()); err != nil {
+ if err := idtools.MkdirAllAndChown(daemonRepo, 0710, idtools.Identity{
+ UID: idtools.CurrentIdentity().UID,
+ GID: rootIDs.GID,
+ }); err != nil {
return nil, err
}
diff --git a/components/engine/daemon/daemon_unix.go b/components/engine/daemon/daemon_unix.go
index 07a0aa0d5..8c21807df 100644
--- a/components/engine/daemon/daemon_unix.go
+++ b/components/engine/daemon/daemon_unix.go
@@ -1291,21 +1291,22 @@ func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools
}
}
+ id := idtools.Identity{UID: idtools.CurrentIdentity().UID, GID: remappedRoot.GID}
+ // First make sure the current root dir has the correct perms.
+ if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil {
+ return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root)
+ }
+
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
- id := idtools.CurrentIdentity()
- // First make sure the current root dir has the correct perms.
- if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
- return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root)
- }
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
- if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
+ if err := idtools.MkdirAllAndChown(config.Root, 0710, id); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
diff --git a/components/engine/daemon/graphdriver/aufs/aufs.go b/components/engine/daemon/graphdriver/aufs/aufs.go
index 4ee3682cb..f0e8e0b23 100644
--- a/components/engine/daemon/graphdriver/aufs/aufs.go
+++ b/components/engine/daemon/graphdriver/aufs/aufs.go
@@ -131,14 +131,23 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
currentID := idtools.CurrentIdentity()
+ _, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ dirID := idtools.Identity{
+ UID: currentID.UID,
+ GID: rootGID,
+ }
+
// Create the root aufs driver dir
- if err := idtools.MkdirAllAndChown(root, 0701, currentID); err != nil {
+ if err := idtools.MkdirAllAndChown(root, 0710, dirID); err != nil {
return nil, err
}
// Populate the dir structure
for _, p := range paths {
- if err := idtools.MkdirAllAndChown(path.Join(root, p), 0701, currentID); err != nil {
+ if err := idtools.MkdirAllAndChown(path.Join(root, p), 0710, dirID); err != nil {
return nil, err
}
}
diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs.go b/components/engine/daemon/graphdriver/btrfs/btrfs.go
index d76e14490..35e14db0f 100644
--- a/components/engine/daemon/graphdriver/btrfs/btrfs.go
+++ b/components/engine/daemon/graphdriver/btrfs/btrfs.go
@@ -70,7 +70,14 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
return nil, graphdriver.ErrPrerequisites
}
- if err := idtools.MkdirAllAndChown(home, 0701, idtools.CurrentIdentity()); err != nil {
+ remappedRoot := idtools.NewIDMappingsFromMaps(uidMaps, gidMaps)
+ currentID := idtools.CurrentIdentity()
+ dirID := idtools.Identity{
+ UID: currentID.UID,
+ GID: remappedRoot.RootPair().GID,
+ }
+
+ if err := idtools.MkdirAllAndChown(home, 0710, dirID); err != nil {
return nil, err
}
@@ -531,7 +538,14 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err != nil {
return err
}
- if err := idtools.MkdirAllAndChown(subvolumes, 0701, idtools.CurrentIdentity()); err != nil {
+
+ currentID := idtools.CurrentIdentity()
+ dirID := idtools.Identity{
+ UID: currentID.UID,
+ GID: rootGID,
+ }
+
+ if err := idtools.MkdirAllAndChown(subvolumes, 0710, dirID); err != nil {
return err
}
if parent == "" {
diff --git a/components/engine/daemon/graphdriver/overlay/overlay.go b/components/engine/daemon/graphdriver/overlay/overlay.go
index a9e65a35c..566c4cc9f 100644
--- a/components/engine/daemon/graphdriver/overlay/overlay.go
+++ b/components/engine/daemon/graphdriver/overlay/overlay.go
@@ -163,8 +163,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
logrus.WithField("storage-driver", "overlay").Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
}
+ currentID := idtools.CurrentIdentity()
+ _, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ dirID := idtools.Identity{
+ UID: currentID.UID,
+ GID: rootGID,
+ }
+
// Create the driver home dir
- if err := idtools.MkdirAllAndChown(home, 0701, idtools.CurrentIdentity()); err != nil {
+ if err := idtools.MkdirAllAndChown(home, 0710, dirID); err != nil {
return nil, err
}
@@ -300,10 +310,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
root := idtools.Identity{UID: rootUID, GID: rootGID}
currentID := idtools.CurrentIdentity()
- if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, currentID); err != nil {
- return err
+ dirID := idtools.Identity{
+ UID: currentID.UID,
+ GID: rootGID,
}
- if err := idtools.MkdirAndChown(dir, 0701, currentID); err != nil {
+ if err := idtools.MkdirAndChown(dir, 0710, dirID); err != nil {
return err
}
diff --git a/components/engine/daemon/graphdriver/overlay2/overlay.go b/components/engine/daemon/graphdriver/overlay2/overlay.go
index 7576320ad..3a9f5ce6e 100644
--- a/components/engine/daemon/graphdriver/overlay2/overlay.go
+++ b/components/engine/daemon/graphdriver/overlay2/overlay.go
@@ -197,7 +197,20 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs))
}
- if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil {
+ _, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ cur := idtools.CurrentIdentity()
+ dirID := idtools.Identity{
+ UID: cur.UID,
+ GID: rootGID,
+ }
+ if err := idtools.MkdirAllAndChown(home, 0710, dirID); err != nil {
+ return nil, err
+ }
+ if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0700, cur); err != nil {
return nil, err
}
@@ -424,12 +437,15 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
root := idtools.Identity{UID: rootUID, GID: rootGID}
- current := idtools.CurrentIdentity()
+ dirID := idtools.Identity{
+ UID: idtools.CurrentIdentity().UID,
+ GID: rootGID,
+ }
- if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil {
+ if err := idtools.MkdirAllAndChown(path.Dir(dir), 0710, dirID); err != nil {
return err
}
- if err := idtools.MkdirAndChown(dir, 0701, current); err != nil {
+ if err := idtools.MkdirAndChown(dir, 0710, dirID); err != nil {
return err
}
diff --git a/components/engine/daemon/graphdriver/vfs/driver.go b/components/engine/daemon/graphdriver/vfs/driver.go
index 15ac25199..3ced5d7a1 100644
--- a/components/engine/daemon/graphdriver/vfs/driver.go
+++ b/components/engine/daemon/graphdriver/vfs/driver.go
@@ -30,7 +30,15 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
home: home,
idMapping: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
}
- if err := idtools.MkdirAllAndChown(home, 0701, idtools.CurrentIdentity()); err != nil {
+ _, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+ dirID := idtools.Identity{
+ UID: idtools.CurrentIdentity().UID,
+ GID: rootGID,
+ }
+ if err := idtools.MkdirAllAndChown(home, 0710, dirID); err != nil {
return nil, err
}
@@ -115,7 +123,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
func (d *Driver) create(id, parent string, size uint64) error {
dir := d.dir(id)
rootIDs := d.idMapping.RootPair()
- if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0701, idtools.CurrentIdentity()); err != nil {
+ dirID := idtools.Identity{
+ UID: idtools.CurrentIdentity().UID,
+ GID: rootIDs.GID,
+ }
+ if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0710, dirID); err != nil {
return err
}
if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil {
diff --git a/components/engine/daemon/graphdriver/zfs/zfs.go b/components/engine/daemon/graphdriver/zfs/zfs.go
index 4484c517a..944f902f6 100644
--- a/components/engine/daemon/graphdriver/zfs/zfs.go
+++ b/components/engine/daemon/graphdriver/zfs/zfs.go
@@ -102,7 +102,16 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
- if err := idtools.MkdirAllAndChown(base, 0701, idtools.CurrentIdentity()); err != nil {
+ _, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ dirID := idtools.Identity{
+ UID: idtools.CurrentIdentity().UID,
+ GID: rootGID,
+ }
+ if err := idtools.MkdirAllAndChown(base, 0710, dirID); err != nil {
return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
}
--
2.30.0

View File

@ -0,0 +1,130 @@
From 47b9fb37236351afc0c2e58c109a70c1432096ff Mon Sep 17 00:00:00 2001
From: zhongjiawei <zhongjiawei1@huawei.com>
Date: Thu, 9 Jun 2022 10:50:43 +0800
Subject: [PATCH] docker: registry: ensure default auth config has address
Conflict:cli/command/registry.go,cli/command/registry/login.go
Reference:https://github.com/docker/cli/commit/893e52cf4ba4b048d72e99748e0f86b2767c6c6b
---
components/cli/cli/command/registry.go | 12 ++++++++----
components/cli/cli/command/registry/login.go | 13 ++++++-------
components/cli/cli/command/registry_test.go | 16 +++++++++++++++-
3 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/components/cli/cli/command/registry.go b/components/cli/cli/command/registry.go
index c12843693..74abbfc5f 100644
--- a/components/cli/cli/command/registry.go
+++ b/components/cli/cli/command/registry.go
@@ -58,11 +58,11 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
if err != nil {
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
}
- err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
+ err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
if err != nil {
return "", err
}
- return EncodeAuthToBase64(*authConfig)
+ return EncodeAuthToBase64(authConfig)
}
}
@@ -81,7 +81,7 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
// GetDefaultAuthConfig gets the default auth config given a serverAddress
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
-func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
+func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
if !isDefaultRegistry {
serverAddress = registry.ConvertToHostname(serverAddress)
}
@@ -89,12 +89,16 @@ func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, is
var err error
if checkCredStore {
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
+ if err != nil {
+ return types.AuthConfig{ServerAddress: serverAddress,}, err
+ }
} else {
authconfig = types.AuthConfig{}
}
authconfig.ServerAddress = serverAddress
authconfig.IdentityToken = ""
- return &authconfig, err
+ res := types.AuthConfig(authconfig)
+ return res, err
}
// ConfigureAuth handles prompting of user's username and password if needed
diff --git a/components/cli/cli/command/registry/login.go b/components/cli/cli/command/registry/login.go
index f4f57398b..f86076c5e 100644
--- a/components/cli/cli/command/registry/login.go
+++ b/components/cli/cli/command/registry/login.go
@@ -111,23 +111,22 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
}
var err error
- var authConfig *types.AuthConfig
var response registrytypes.AuthenticateOKBody
isDefaultRegistry := serverAddress == authServer
- authConfig, err = command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
+ authConfig, err := command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
if err == nil && authConfig.Username != "" && authConfig.Password != "" {
- response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig)
+ response, err = loginWithCredStoreCreds(ctx, dockerCli, &authConfig)
}
if err != nil || authConfig.Username == "" || authConfig.Password == "" {
- err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry)
+ err = command.ConfigureAuth(dockerCli, opts.user, opts.password, &authConfig, isDefaultRegistry)
if err != nil {
return err
}
- response, err = clnt.RegistryLogin(ctx, *authConfig)
+ response, err = clnt.RegistryLogin(ctx, authConfig)
if err != nil && client.IsErrConnectionFailed(err) {
// If the server isn't responding (yet) attempt to login purely client side
- response, err = loginClientSide(ctx, *authConfig)
+ response, err = loginClientSide(ctx, authConfig)
}
// If we (still) have an error, give up
if err != nil {
@@ -149,7 +148,7 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
}
}
- if err := creds.Store(*authConfig); err != nil {
+ if err := creds.Store(types.AuthConfig(authConfig)); err != nil {
return errors.Errorf("Error saving credentials: %v", err)
}
diff --git a/components/cli/cli/command/registry_test.go b/components/cli/cli/command/registry_test.go
index 966db86b9..a4a7fe184 100644
--- a/components/cli/cli/command/registry_test.go
+++ b/components/cli/cli/command/registry_test.go
@@ -144,7 +144,21 @@ func TestGetDefaultAuthConfig(t *testing.T) {
assert.Check(t, is.Equal(tc.expectedErr, err.Error()))
} else {
assert.NilError(t, err)
- assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig))
+ assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, authconfig))
}
}
}
+
+func TestGetDefaultAuthConfig_HelperError(t *testing.T) {
+ cli := test.NewFakeCli(&fakeClient{})
+ errBuf := new(bytes.Buffer)
+ cli.SetErr(errBuf)
+ cli.ConfigFile().CredentialsStore = "fake-does-not-exist"
+ serverAddress := "test-server-address"
+ expectedAuthConfig := types.AuthConfig{
+ ServerAddress: serverAddress,
+ }
+ authconfig, err := GetDefaultAuthConfig(cli, true, serverAddress, serverAddress == "https://index.docker.io/v1/")
+ assert.Check(t, is.DeepEqual(expectedAuthConfig, authconfig))
+ assert.Check(t, is.ErrorContains(err, "docker-credential-fake-does-not-exist"))
+}
--
2.30.0

View File

@ -0,0 +1,62 @@
From 886c1473eddbb1a56f7bae116ad155ccb7c7cfb0 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Wed, 10 Aug 2022 16:05:06 +0800
Subject: [PATCH] docker: fix terminal abnormal after docker run
when docker run -it xxx bash and exit, the terminal will be abnormal
(no input, no output).
The reason is in golang 1.17, Package reflect's Value methods named
Pointer and UnsafeAddr return type uintptr instead of unsafe.
Pointer to keep callers from changing the result to an arbitrary type
without first importing "unsafe". However, this means that the result
is fragile and must be converted to Pointer immediately after making the call,
in the same expression:
p := (*int)(unsafe.Pointer(reflect.ValueOf(new(int)).Pointer()))
As in the cases above, it is invalid to store the result before the conversion:
// INVALID: uintptr cannot be stored in variable
// before conversion back to Pointer.
u := reflect.ValueOf(new(int)).Pointer()
p := (*int)(unsafe.Pointer(u))
---
.../vendor/golang.org/x/sys/unix/syscall_linux.go | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/components/cli/vendor/golang.org/x/sys/unix/syscall_linux.go b/components/cli/vendor/golang.org/x/sys/unix/syscall_linux.go
index 690c2c87f..ca415b73f 100644
--- a/components/cli/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/components/cli/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -73,19 +73,28 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
// from fd, using the specified request number.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
- err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ var err error
+ if _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(unsafe.Pointer(&value))); e1 != 0 {
+ err = errnoErr(e1)
+ }
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
- err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ var err error
+ if _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(unsafe.Pointer(&value))); e1 != 0 {
+ err = errnoErr(e1)
+ }
return &value, err
}
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
var value Termios
- err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+ var err error
+ if _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(unsafe.Pointer(&value))); e1 != 0 {
+ err = errnoErr(e1)
+ }
return &value, err
}
--
2.23.0

View File

@ -0,0 +1,48 @@
From ebe1a56fb28e7de7128167973a99061e6aa0222a Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 7 Jul 2022 10:18:03 +0800
Subject: [PATCH] docker: Add an ExitPid field for State struct to record exit
process id
---
components/engine/container/state.go | 1 +
components/engine/daemon/monitor.go | 4 ++++
2 files changed, 5 insertions(+)
diff --git a/components/engine/container/state.go b/components/engine/container/state.go
index da19cc49e..292b0ec0b 100644
--- a/components/engine/container/state.go
+++ b/components/engine/container/state.go
@@ -27,6 +27,7 @@ type State struct {
RemovalInProgress bool // Not need for this to be persistent on disk.
Dead bool
Pid int
+ ExitPid uint32 // record exit process id.
ExitCodeValue int `json:"ExitCode"`
ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove
StartedAt time.Time
diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go
index a5c7ff5c8..89c05f3da 100644
--- a/components/engine/daemon/monitor.go
+++ b/components/engine/daemon/monitor.go
@@ -57,6 +57,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libc
daemon.LogContainerEvent(c, "oom")
case libcontainerd.EventExit:
+ c.ExitPid = ei.Pid
if int(ei.Pid) == c.Pid {
logrus.Infof("handle container %s exit event pid=%d", c.ID, c.Pid)
c.Lock()
@@ -169,6 +170,9 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libc
// This is here to handle start not generated by docker
if !c.Running {
+ if c.ExitPid == ei.Pid && time.Now().UTC().Sub(c.FinishedAt).Seconds() < 3 {
+ return nil
+ }
c.SetRunning(int(ei.Pid), false)
c.HasBeenManuallyStopped = false
c.HasBeenStartedBefore = true
--
2.23.0

View File

@ -0,0 +1,32 @@
From 7f4258e2b8b9b769beab23246d27984ada539ac9 Mon Sep 17 00:00:00 2001
From: chenjiankun <chenjiankun1@huawei.com>
Date: Thu, 15 Sep 2022 10:33:12 +0800
Subject: [PATCH] docker: AdditionalGids must include effective group ID
otherwise this one won't be considered for permission checks
Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
Conflict:daemon/oci_linux.go
Reference:https://github.com/moby/moby/commit/de7af816e76a7fd3fbf06bffa6832959289fba32
---
components/engine/daemon/oci_linux.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/components/engine/daemon/oci_linux.go b/components/engine/daemon/oci_linux.go
index 6d3bc165..2b803955 100644
--- a/components/engine/daemon/oci_linux.go
+++ b/components/engine/daemon/oci_linux.go
@@ -201,7 +201,7 @@ func getUser(c *container.Container, username string) (uint32, uint32, []uint32,
uid := uint32(execUser.Uid)
gid := uint32(execUser.Gid)
sgids := append(execUser.Sgids, addGroups...)
- var additionalGids []uint32
+ additionalGids := []uint32{gid}
for _, g := range sgids {
additionalGids = append(additionalGids, uint32(g))
}
--
2.23.0

View File

@ -188,7 +188,45 @@ patch/0186-docker-fix-execCommands-leak-in-health-check.patch
patch/0188-docker-check-containerd-pid-before-kill-it.patch
patch/0189-docker-fix-Access-to-remapped-root-allows-privilege-.patch
patch/0190-docker-fix-CVE-2021-21285.patch
patch/0191-rollback-if-docker-restart-when-doing-BlkDiscard.patch
patch/0192-docker-add-clone3-to-seccomp-whitelist-to-fix-curl-f.patch
patch/0193-docker-update-seccomp-whitelist-to-Linux-5.10-syscal.patch
patch/0191-docker-add-clone3-to-seccomp-whitelist-to-fix-curl-f.patch
patch/0192-docker-update-seccomp-whitelist-to-Linux-5.10-syscal.patch
patch/0193-docker-fix-images-filter-when-use-multi-reference.patch
patch/0194-docker-fix-docker-rmi-stucking.patch
patch/0195-docker-fix-network-sandbox-not-cleaned-up-on-failure.patch
patch/0196-docker-fix-container-status-not-consistent-with-its-.patch
patch/0197-docker-fix-hijack-hang.patch
patch/0198-docker-fix-docker-kill-command-block.patch
patch/0199-docker-pkg-archive-fix-TestTarUntarWithXattr-failure-on-rec.patch
patch/0200-docker-fix-unit-testcase-error.patch
patch/0201-docker-use-info-level-for-create-start-stop-command.patch
patch/0202-docker-rollback-if-docker-restart-when-doing-BlkDiscard.patch
patch/0203-docker-Fix-for-lack-of-syncromization-in-daemon-update.go.patch
patch/0204-docker-Don-t-fail-on-two-concurrent-reference.store.AddDige.patch
patch/0205-docker-Unexport-testcase.Cleanup-to-fix-Go-1.14.patch
patch/0206-docker-archive-fix-race-condition-in-cmdStream.patch
patch/0207-docker-fix-runc-data-and-dm-left-when-periodically-kill-containerd.patch
patch/0208-docker-fix-ProcessEvent-block-when-CloseStreams-block.patch
patch/0209-docker-check-db-file-size-before-start-containerd.patch
patch/0210-docker-fix-dangling-unpigz.patch
patch/0211-docker-add-timeout-for-IO.Wait.patch
patch/0212-docker-fix-time-Ticker-leak.patch
patch/0213-docker-fix-bug-where-failed-kills-didnt-fallback-to-unix-kill.patch
patch/0214-docker-do-not-check-result-of-issueDiscard.patch
patch/0215-docker-add-info-log-for-pulling-image.patch
patch/0216-docker-Adding-logs-for-debugging-in-docker-stop.patch
patch/0217-docker-add-log-for-easy-debug-in-exit-event-handler.patch
patch/0218-docker-change-log-level-when-containerd-return-conta.patch
patch/0219-docker-Fix-container-exited-after-docker-restart-whe.patch
patch/0220-docker-fix-endpoint-with-name-container_xx-already-e.patch
patch/0221-docker-fix-Up-292-years-in-status-in-docker-ps-a.patch
patch/0222-docker-Use-original-process-spec-for-execs.patch
patch/0223-docker-fix-CVE-2022-24769.patch
patch/0224-fix-rwlayer-umountd-after-container-restart.patch
patch/0225-docker-close-channel-in-write-side-to-avoid-panic-in.patch
patch/0226-docker-chrootarchive-don-t-create-parent-dirs-outside-of-ch.patch
patch/0227-docker-Lock-down-docker-root-dir-perms.patch
patch/0228-docker-registry-ensure-default-auth-config-has-address.patch
patch/0229-docker-fix-terminal-abnormal-after-docker-run.patch
patch/0230-docker-Add-an-ExitPid-field-for-State-struct-to-reco.patch
patch/0231-docker-AdditionalGids-must-include-effective-group-I.patch
#end