cloud-init: backport upstream patches

Reference:fa53c7f408
9c7502a801
2e17a0d626
9f0efc474e
bb414c7866
9cbd94dd57
6d817e94be
0450a1faff
This commit is contained in:
Lv Ying 2023-05-24 01:50:07 +00:00
commit 2fd7c53089
10 changed files with 1311 additions and 1 deletions

View File

@ -0,0 +1,130 @@
From 5864217bf933927982ea3af2d93c2baccbaa3ba4 Mon Sep 17 00:00:00 2001
From: Andrew Lee <andrew.lee@metaswitch.com>
Date: Thu, 7 Apr 2022 21:52:44 +0100
Subject: [PATCH 3/8] =?UTF-8?q?BUG=201473527:=20module=20ssh-authkey-finge?=
=?UTF-8?q?rprints=20fails=20Input/output=20error=E2=80=A6=20(#1340)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Reference:https://github.com/canonical/cloud-init/commit/fa53c7f4086f5937bc9bd328dba9f91ca73b6614
Conflict:tools/.github-cla-signers not change.
Don't error if we cannot log to /dev/console
We've seen instances on VMware of serial consoles not being set up
correctly by the kernel, making /dev/ttyS0 not set up correctly, and
hence /dev/console not writeable to.
In such circumstances, cloud-init should not fail, instead it should
gracefully fall back to logging to stdout.
The only time cloud-init tries to write to `/dev/console` is in the
`multi_log` command- which is called by the
ssh-authkey-fingerprints module
LP: #1473527
---
cloudinit/util.py | 33 +++++++++++++++++++++++++--------
tests/unittests/test_util.py | 27 +++++++++++++++++++++++++++
2 files changed, 52 insertions(+), 8 deletions(-)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index ef1b588..d5e8277 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -359,20 +359,37 @@ def find_modules(root_dir):
return entries
+def write_to_console(conpath, text):
+ with open(conpath, "w") as wfh:
+ wfh.write(text)
+ wfh.flush()
+
+
def multi_log(text, console=True, stderr=True,
log=None, log_level=logging.DEBUG, fallback_to_stdout=True):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
+ writing_to_console_worked = False
if os.path.exists(conpath):
- with open(conpath, 'w') as wfh:
- wfh.write(text)
- wfh.flush()
- elif fallback_to_stdout:
- # A container may lack /dev/console (arguably a container bug). If
- # it does not exist, then write output to stdout. this will result
- # in duplicate stderr and stdout messages if stderr was True.
+ try:
+ write_to_console(conpath, text)
+ writing_to_console_worked = True
+ except OSError:
+ console_error = "Failed to write to /dev/console"
+ sys.stdout.write(f"{console_error}\n")
+ if log:
+ log.log(logging.WARNING, console_error)
+
+ if fallback_to_stdout and not writing_to_console_worked:
+ # A container may lack /dev/console (arguably a container bug).
+ # Additionally, /dev/console may not be writable to on a VM (again
+ # likely a VM bug or virtualization bug).
+ #
+ # If either of these is the case, then write output to stdout.
+ # This will result in duplicate stderr and stdout messages if
+ # stderr was True.
#
# even though upstart or systemd might have set up output to go to
# /dev/console, the user may have configured elsewhere via
@@ -1948,7 +1965,7 @@ def write_file(
omode="wb",
preserve_mode=False,
*,
- ensure_dir_exists=True
+ ensure_dir_exists=True,
):
"""
Writes a file with the given content and sets the file mode as specified.
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index bc30c90..0b01337 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -576,6 +576,33 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
util.multi_log('something', fallback_to_stdout=False)
self.assertEqual('', self.stdout.getvalue())
+ @mock.patch(
+ "cloudinit.util.write_to_console",
+ mock.Mock(side_effect=OSError("Failed to write to console")),
+ )
+ def test_logs_go_to_stdout_if_writing_to_console_fails_and_fallback_true(
+ self,
+ ):
+ self._createConsole(self.root)
+ util.multi_log("something", fallback_to_stdout=True)
+ self.assertEqual(
+ "Failed to write to /dev/console\nsomething",
+ self.stdout.getvalue(),
+ )
+
+ @mock.patch(
+ "cloudinit.util.write_to_console",
+ mock.Mock(side_effect=OSError("Failed to write to console")),
+ )
+ def test_logs_go_nowhere_if_writing_to_console_fails_and_fallback_false(
+ self,
+ ):
+ self._createConsole(self.root)
+ util.multi_log("something", fallback_to_stdout=False)
+ self.assertEqual(
+ "Failed to write to /dev/console\n", self.stdout.getvalue()
+ )
+
def test_logs_go_to_log_if_given(self):
log = mock.MagicMock()
logged_string = 'something very important'
--
2.40.0

View File

@ -0,0 +1,332 @@
From a378b7e4f47375458651c0972e7cd813f6fe0a6b Mon Sep 17 00:00:00 2001
From: James Falcon <james.falcon@canonical.com>
Date: Wed, 26 Apr 2023 15:11:55 -0500
Subject: [PATCH] Make user/vendor data sensitive and remove log permissions
(#2144)
Because user data and vendor data may contain sensitive information,
this commit ensures that any user data or vendor data written to
instance-data.json gets redacted and is only available to root user.
Also, modify the permissions of cloud-init.log to be 640, so that
sensitive data leaked to the log isn't world readable.
Additionally, remove the logging of user data and vendor data to
cloud-init.log from the Vultr datasource.
LP: #2013967
CVE: CVE-2023-1786
---
cloudinit/sources/DataSourceLXD.py | 11 +++++-
cloudinit/sources/DataSourceVultr.py | 14 +++----
cloudinit/sources/__init__.py | 35 ++++++++++++++---
cloudinit/sources/tests/test_init.py | 58 ++++++++++++++++++++++++----
cloudinit/stages.py | 4 +-
cloudinit/tests/test_stages.py | 18 +++++----
6 files changed, 109 insertions(+), 31 deletions(-)
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 732b32f..1e1e9e2 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -14,6 +14,7 @@ import os
import requests
from requests.adapters import HTTPAdapter
+from typing import Tuple
# pylint fails to import the two modules below.
# These are imported via requests.packages rather than urllib3 because:
@@ -173,8 +174,14 @@ class DataSourceLXD(sources.DataSource):
_network_config = sources.UNSET
_crawled_metadata = sources.UNSET
- sensitive_metadata_keys = (
- 'merged_cfg', 'user.meta-data', 'user.vendor-data', 'user.user-data',
+ sensitive_metadata_keys: Tuple[
+ str, ...
+ ] = sources.DataSource.sensitive_metadata_keys + (
+ "user.meta-data",
+ "user.vendor-data",
+ "user.user-data",
+ "cloud-init.user-data",
+ "cloud-init.vendor-data",
)
def _is_platform_viable(self) -> bool:
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index 68e1ff0..4d41d4e 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -10,6 +10,8 @@ from cloudinit import sources
from cloudinit import util
from cloudinit import version
+from typing import Tuple
+
import cloudinit.sources.helpers.vultr as vultr
LOG = log.getLogger(__name__)
@@ -29,6 +31,10 @@ class DataSourceVultr(sources.DataSource):
dsname = 'Vultr'
+ sensitive_metadata_keys: Tuple[
+ str, ...
+ ] = sources.DataSource.sensitive_metadata_keys + ("startup-script",)
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
self.ds_cfg = util.mergemanydict([
@@ -54,13 +60,8 @@ class DataSourceVultr(sources.DataSource):
self.get_datasource_data(self.metadata)
# Dump some data so diagnosing failures is manageable
- LOG.debug("Vultr Vendor Config:")
- LOG.debug(util.json_dumps(self.metadata['vendor-data']))
LOG.debug("SUBID: %s", self.metadata['instance-id'])
LOG.debug("Hostname: %s", self.metadata['local-hostname'])
- if self.userdata_raw is not None:
- LOG.debug("User-Data:")
- LOG.debug(self.userdata_raw)
return True
@@ -141,7 +142,4 @@ if __name__ == "__main__":
config = md['vendor-data']
sysinfo = vultr.get_sysinfo()
- print(util.json_dumps(sysinfo))
- print(util.json_dumps(config))
-
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f2f2343..20cc397 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,7 +13,7 @@ import copy
import json
import os
from collections import namedtuple
-from typing import Dict, List # noqa: F401
+from typing import Dict, List, Tuple # noqa: F401
from cloudinit import dmi
from cloudinit import importer
@@ -103,7 +103,10 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
- if key in sensitive_keys or sub_key_path in sensitive_keys:
+ if (
+ key.lower() in sensitive_keys
+ or sub_key_path.lower() in sensitive_keys
+ ):
sens_keys.append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
base64_encoded_keys.append(sub_key_path)
@@ -124,6 +127,12 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
Replace any keys values listed in 'sensitive_keys' with redact_value.
"""
+ # While 'sensitive_keys' should already sanitized to only include what
+ # is in metadata, it is possible keys will overlap. For example, if
+ # "merged_cfg" and "merged_cfg/ds/userdata" both match, it's possible that
+ # "merged_cfg" will get replaced first, meaning "merged_cfg/ds/userdata"
+ # no longer represents a valid key.
+ # Thus, we still need to do membership checks in this function.
if not metadata.get('sensitive_keys', []):
return metadata
md_copy = copy.deepcopy(metadata)
@@ -131,9 +140,14 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
path_parts = key_path.split('/')
obj = md_copy
for path in path_parts:
- if isinstance(obj[path], dict) and path != path_parts[-1]:
+ if (
+ path in obj
+ and isinstance(obj[path], dict)
+ and path != path_parts[-1]
+ ):
obj = obj[path]
- obj[path] = redact_value
+ if path in obj:
+ obj[path] = redact_value
return md_copy
@@ -215,7 +229,18 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ sensitive_metadata_keys: Tuple[str, ...] = (
+ "merged_cfg",
+ "security-credentials",
+ "userdata",
+ "user-data",
+ "user_data",
+ "vendordata",
+ "vendor-data",
+ # Provide ds/vendor_data to avoid redacting top-level
+ # "vendor_data": {enabled: True}
+ "ds/vendor_data",
+ )
_ci_pkl_version = 1
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index ae09cb1..bce2ab5 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -353,11 +353,32 @@ class TestDataSource(CiTestCase):
'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ 'some': {
+ 'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'
+ }
+ },
+ "someother": {
+ "nested": {
+ "userData": "HIDE ME",
+ }
+ },
+ "VENDOR-DAta": "HIDE ME TOO",
+ },
+ )
self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
+ (
+ "merged_cfg",
+ "security-credentials",
+ "userdata",
+ "user-data",
+ "user_data",
+ "vendordata",
+ "vendor-data",
+ "ds/vendor_data",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
sys_info = {
"python": "3.7",
"platform":
@@ -373,7 +394,11 @@ class TestDataSource(CiTestCase):
'base64_encoded_keys': [],
'merged_cfg': REDACT_SENSITIVE_VALUE,
'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ "ds/meta_data/VENDOR-DAta",
+ "ds/meta_data/some/security-credentials",
+ "ds/meta_data/someother/nested/userData",
+ "merged_cfg",
+ ],
'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
@@ -381,6 +406,7 @@ class TestDataSource(CiTestCase):
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ "cloud_id": "subclasscloudname",
'distro': 'ubuntu',
'distro_release': 'focal',
'distro_version': '20.04',
@@ -401,10 +427,16 @@ class TestDataSource(CiTestCase):
'ds': {
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {
+ "VENDOR-DAta": REDACT_SENSITIVE_VALUE,
'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
'region': 'myregion',
- 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
+ 'some': {'security-credentials': REDACT_SENSITIVE_VALUE},
+ "someother": {
+ "nested": {"userData": REDACT_SENSITIVE_VALUE}
+ },
+ },
+ },
}
self.assertCountEqual(expected, redacted)
file_stat = os.stat(json_file)
@@ -432,8 +464,18 @@ class TestDataSource(CiTestCase):
"variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
+ (
+ "merged_cfg",
+ "security-credentials",
+ "userdata",
+ "user-data",
+ "user_data",
+ "vendordata",
+ "vendor-data",
+ "ds/vendor_data",
+ ),
+ datasource.sensitive_metadata_keys,
+ )
with mock.patch("cloudinit.util.system_info", return_value=sys_info):
datasource.get_data()
sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 731b298..59b0925 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -204,7 +204,9 @@ class Init(object):
util.ensure_dirs(self._initial_subdirs())
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
if log_file:
- util.ensure_file(log_file, mode=0o640, preserve_mode=True)
+ # At this point the log file should have already been created
+ # in the setupLogging function of log.py
+ util.ensure_file(log_file, mode=0o640, preserve_mode=False)
perms = self.cfg.get('syslog_fix_perms')
if not perms:
perms = {}
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
index a50836a..aeab17a 100644
--- a/cloudinit/tests/test_stages.py
+++ b/cloudinit/tests/test_stages.py
@@ -458,21 +458,25 @@ class TestInit_InitializeFilesystem:
# Assert we create it 0o640 by default if it doesn't already exist
assert 0o640 == stat.S_IMODE(log_file.stat().mode)
- def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
- """If the log file already exists, we should not modify its permissions
+ def test_existing_file_permissions(self, init, tmpdir):
+ """Test file permissions are set as expected.
+
+ CIS Hardening requires 640 permissions. These permissions are
+ currently hardcoded on every boot, but if there's ever a reason
+ to change this, we need to then ensure that they
+ are *not* set every boot.
See https://bugs.launchpad.net/cloud-init/+bug/1900837.
"""
- # Use a mode that will never be made the default so this test will
- # always be valid
- mode = 0o606
log_file = tmpdir.join("cloud-init.log")
log_file.ensure()
- log_file.chmod(mode)
+ # Use a mode that will never be made the default so this test will
+ # always be valid
+ log_file.chmod(0o606)
init._cfg = {"def_log_file": str(log_file)}
init._initialize_filesystem()
- assert mode == stat.S_IMODE(log_file.stat().mode)
+ assert 0o640 == stat.S_IMODE(log_file.stat().mode)
# vi: ts=4 expandtab
--
2.33.0

View File

@ -0,0 +1,138 @@
From 369fcfa32a0448463e1593269a25000f94d9a23d Mon Sep 17 00:00:00 2001
From: Andrew Kutz <101085+akutz@users.noreply.github.com>
Date: Mon, 22 Aug 2022 14:08:39 -0500
Subject: [PATCH 5/8] DataSourceVMware: fix var use before init (#1674)
Reference:https://github.com/canonical/cloud-init/commit/9f0efc474ea430c75cd0abec3e2da719d4934346
Conflict:change tests/unittests/test_datasource/test_vmware.py not tests/unittests/sources/test_vmware.py
This patch fixes an issue in the DataSourceVMware code where the
variable ipv6_ready was used in a logging statement before it was
initialized. Now the variable is initialized, avoiding the panic.
LP: #1987005
---
cloudinit/sources/DataSourceVMware.py | 7 +-
.../unittests/test_datasource/test_vmware.py | 74 +++++++++++++++++++
2 files changed, 79 insertions(+), 2 deletions(-)
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
index 22ca63d..197c926 100644
--- a/cloudinit/sources/DataSourceVMware.py
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -812,7 +812,7 @@ def wait_on_network(metadata):
wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val)
# Get information about the host.
- host_info = None
+ host_info, ipv4_ready, ipv6_ready = None, False, False
while host_info is None:
# This loop + sleep results in two logs every second while waiting
# for either ipv4 or ipv6 up. Do we really need to log each iteration
@@ -857,7 +857,10 @@ def main():
except Exception:
pass
metadata = {
- "wait-on-network": {"ipv4": True, "ipv6": "false"},
+ WAIT_ON_NETWORK: {
+ WAIT_ON_NETWORK_IPV4: True,
+ WAIT_ON_NETWORK_IPV6: False,
+ },
"network": {"config": {"dhcp": True}},
}
host_info = wait_on_network(metadata)
diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py
index 52f910b..35be74b 100644
--- a/tests/unittests/test_datasource/test_vmware.py
+++ b/tests/unittests/test_datasource/test_vmware.py
@@ -75,6 +75,8 @@ class TestDataSourceVMware(CiTestCase):
Test common functionality that is not transport specific.
"""
+ with_logs = True
+
def setUp(self):
super(TestDataSourceVMware, self).setUp()
self.tmp = self.tmp_dir()
@@ -93,6 +95,78 @@ class TestDataSourceVMware(CiTestCase):
self.assertTrue(host_info["local_hostname"])
self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+ @mock.patch("cloudinit.sources.DataSourceVMware.get_host_info")
+ def test_wait_on_network(self, m_fn):
+ metadata = {
+ DataSourceVMware.WAIT_ON_NETWORK: {
+ DataSourceVMware.WAIT_ON_NETWORK_IPV4: True,
+ DataSourceVMware.WAIT_ON_NETWORK_IPV6: False,
+ },
+ }
+ m_fn.side_effect = [
+ {
+ "hostname": "host.cloudinit.test",
+ "local-hostname": "host.cloudinit.test",
+ "local_hostname": "host.cloudinit.test",
+ "network": {
+ "interfaces": {
+ "by-ipv4": {},
+ "by-ipv6": {},
+ "by-mac": {
+ "aa:bb:cc:dd:ee:ff": {"ipv4": [], "ipv6": []}
+ },
+ },
+ },
+ },
+ {
+ "hostname": "host.cloudinit.test",
+ "local-hostname": "host.cloudinit.test",
+ "local-ipv4": "10.10.10.1",
+ "local_hostname": "host.cloudinit.test",
+ "network": {
+ "interfaces": {
+ "by-ipv4": {
+ "10.10.10.1": {
+ "mac": "aa:bb:cc:dd:ee:ff",
+ "netmask": "255.255.255.0",
+ }
+ },
+ "by-mac": {
+ "aa:bb:cc:dd:ee:ff": {
+ "ipv4": [
+ {
+ "addr": "10.10.10.1",
+ "broadcast": "10.10.10.255",
+ "netmask": "255.255.255.0",
+ }
+ ],
+ "ipv6": [],
+ }
+ },
+ },
+ },
+ },
+ ]
+
+ host_info = DataSourceVMware.wait_on_network(metadata)
+
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "DEBUG: waiting on network: wait4=True, "
+ + "ready4=False, wait6=False, ready6=False\n",
+ "DEBUG: waiting on network complete\n",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ self.assertTrue(host_info)
+ self.assertTrue(host_info["hostname"])
+ self.assertTrue(host_info["hostname"] == "host.cloudinit.test")
+ self.assertTrue(host_info["local-hostname"])
+ self.assertTrue(host_info["local_hostname"])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4])
+ self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4] == "10.10.10.1")
+
class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase):
"""
--
2.40.0

View File

@ -0,0 +1,177 @@
From fadb6489cfbc14c67ebcd9b34a032ad574a3d529 Mon Sep 17 00:00:00 2001
From: Brett Holman <brett.holman@canonical.com>
Date: Wed, 13 Jul 2022 13:05:46 -0600
Subject: [PATCH 4/8] Resource leak cleanup (#1556)
Reference:https://github.com/canonical/cloud-init/commit/9cbd94dd57112083856ead0e0ff724e9d1c1f714
Conflict:test file.
Add tox target for tracing for resource leaks, fix some leaks
---
cloudinit/analyze/__main__.py | 13 +++++++++++++
cloudinit/analyze/tests/test_boot.py | 24 +++++++++++++-----------
cloudinit/analyze/tests/test_dump.py | 4 ++--
cloudinit/cmd/cloud_id.py | 3 ++-
tests/unittests/test_util.py | 3 ++-
tox.ini | 9 +++++++++
6 files changed, 41 insertions(+), 15 deletions(-)
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 99e5c20..4ec609c 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -8,6 +8,7 @@ import sys
from cloudinit.util import json_dumps
from datetime import datetime
+from typing import IO
from . import dump
from . import show
@@ -136,6 +137,7 @@ def analyze_boot(name, args):
}
outfh.write(status_map[status_code].format(**kwargs))
+ clean_io(infh, outfh)
return status_code
@@ -161,6 +163,7 @@ def analyze_blame(name, args):
outfh.write('\n'.join(srecs) + '\n')
outfh.write('\n')
outfh.write('%d boot records analyzed\n' % (idx + 1))
+ clean_io(infh, outfh)
def analyze_show(name, args):
@@ -193,12 +196,14 @@ def analyze_show(name, args):
'character.\n\n')
outfh.write('\n'.join(record) + '\n')
outfh.write('%d boot records analyzed\n' % (idx + 1))
+ clean_io(infh, outfh)
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
outfh.write(json_dumps(_get_events(infh)) + '\n')
+ clean_io(infh, outfh)
def _get_events(infile):
@@ -232,6 +237,14 @@ def configure_io(args):
return (infh, outfh)
+def clean_io(*file_handles: IO) -> None:
+ """close filehandles"""
+ for file_handle in file_handles:
+ if file_handle in (sys.stdin, sys.stdout):
+ continue
+ file_handle.close()
+
+
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
index f69423c..6676676 100644
--- a/cloudinit/analyze/tests/test_boot.py
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -117,17 +117,19 @@ class TestAnalyzeBoot(CiTestCase):
analyze_boot(name_default, args)
# now args have been tested, go into outfile and make sure error
# message is in the outfile
- outfh = open(args.outfile, 'r')
- data = outfh.read()
- err_string = 'Your Linux distro or container does not support this ' \
- 'functionality.\nYou must be running a Kernel ' \
- 'Telemetry supported distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest/topics' \
- '/analyze.html for more information on supported ' \
- 'distros.\n'
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(err_string, data)
+ with open(args.outfile, "r") as outfh:
+ data = outfh.read()
+ err_string = (
+ "Your Linux distro or container does not support this "
+ "functionality.\nYou must be running a Kernel "
+ "Telemetry supported distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest/topics"
+ "/analyze.html for more information on supported "
+ "distros.\n"
+ )
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(err_string, data)
@mock.patch("cloudinit.util.is_container", return_value=True)
@mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index dac1efb..27db1b1 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -184,8 +184,8 @@ class TestDumpEvents(CiTestCase):
tmpfile = self.tmp_path('logfile')
write_file(tmpfile, SAMPLE_LOGS)
m_parse_from_date.return_value = 1472594005.972
-
- events, data = dump_events(cisource=open(tmpfile))
+ with open(tmpfile) as file:
+ events, data = dump_events(cisource=file)
year = datetime.now().year
dt1 = datetime.strptime(
'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y')
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 9760892..985f9a2 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -53,7 +53,8 @@ def handle_args(name, args):
@return: 0 on success, 1 otherwise.
"""
try:
- instance_data = json.load(open(args.instance_data))
+ with open(args.instance_data) as file:
+ instance_data = json.load(file)
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 0b01337..1185487 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -560,7 +560,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
self._createConsole(self.root)
logged_string = 'something very important'
util.multi_log(logged_string)
- self.assertEqual(logged_string, open('/dev/console').read())
+ with open("/dev/console") as f:
+ self.assertEqual(logged_string, f.read())
def test_logs_dont_go_to_stdout_if_console_exists(self):
self._createConsole(self.root)
diff --git a/tox.ini b/tox.ini
index 874d3f2..5360067 100644
--- a/tox.ini
+++ b/tox.ini
@@ -60,6 +60,15 @@ commands =
{envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
doc8 doc/rtd
+#commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \
+[testenv:py3-leak]
+deps = {[testenv:py3]deps}
+commands = {envpython} -X tracemalloc=40 -Wall -m pytest \
+ --durations 10 \
+ {posargs:--cov=cloudinit --cov-branch \
+ tests/unittests}
+
+
[xenial-shared-deps]
# The version of pytest in xenial doesn't work with Python 3.8, so we define
# two xenial environments: [testenv:xenial] runs the tests with exactly the
--
2.40.0

View File

@ -0,0 +1,70 @@
From aacf03969de361c50c6add15cf665335dc593a36 Mon Sep 17 00:00:00 2001
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Date: Wed, 18 Jan 2023 17:55:16 +0100
Subject: [PATCH 6/8] cc_set_hostname: ignore /var/lib/cloud/data/set-hostname
if it's empty (#1967)
Reference:https://github.com/canonical/cloud-init/commit/9c7502a801763520639c66125eb373123d1e4f44
Conflict:change tests/unittests/test_handler/test_handler_set_hostname.py not tests/unittests/config/test_cc_set_hostname.py.
If the file exists but is empty, do nothing.
Otherwise cloud-init will crash because it does not handle the empty file.
RHBZ: 2140893
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
cloudinit/config/cc_set_hostname.py | 2 +-
.../test_handler/test_handler_set_hostname.py | 18 ++++++++++++++++++
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index a96bcc1..5fb8b75 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -84,7 +84,7 @@ def handle(name, cfg, cloud, log, _args):
# distro._read_hostname implementation so we only validate one artifact.
prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
prev_hostname = {}
- if os.path.exists(prev_fn):
+ if os.path.exists(prev_fn) and os.stat(prev_fn).st_size > 0:
prev_hostname = util.load_json(util.load_file(prev_fn))
hostname_changed = (hostname != prev_hostname.get('hostname') or
fqdn != prev_hostname.get('fqdn'))
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index 1a524c7..0ed9e03 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -15,6 +15,7 @@ import os
import shutil
import tempfile
from io import BytesIO
+from pathlib import Path
from unittest import mock
LOG = logging.getLogger(__name__)
@@ -204,4 +205,21 @@ class TestHostname(t_help.FilesystemMockingTestCase):
' OOPS on: hostname1.me.com',
str(ctx_mgr.exception))
+ def test_ignore_empty_previous_artifact_file(self):
+ cfg = {
+ "hostname": "blah",
+ "fqdn": "blah.blah.blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ prev_fn = Path(cc.get_cpath("data")) / "set-hostname"
+ prev_fn.touch()
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah", contents.strip())
+
+
# vi: ts=4 expandtab
--
2.40.0

View File

@ -0,0 +1,100 @@
From 7e59cb3d7536d847a5138fe3702909c7af0812de Mon Sep 17 00:00:00 2001
From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com>
Date: Fri, 25 Feb 2022 05:04:54 +0530
Subject: [PATCH 2/8] check for existing symlink while force creating symlink
(#1281)
Reference:https://github.com/canonical/cloud-init/commit/2e17a0d626d41147b7d0822013e80179b3a81ee4
Conflict:(1)change cloudinit/tests/test_util.py not tests/unittests/test_util.py
(2) add "import os" in test
If a dead symlink by the same name is present, os.path.exists returns
false, use os.path.lexists instead.
Signed-off-by: Shreenidhi Shedi <sshedi@vmware.com>
---
cloudinit/tests/test_util.py | 47 ++++++++++++++++++++++++++++++++++++
cloudinit/util.py | 2 +-
2 files changed, 48 insertions(+), 1 deletion(-)
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index ab5eb35..671b8bc 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -7,6 +7,7 @@ import logging
import json
import platform
import pytest
+import os
import cloudinit.util as util
from cloudinit import subp
@@ -312,6 +313,52 @@ class TestUtil(CiTestCase):
self.assertEqual(is_rw, False)
+class TestSymlink(CiTestCase):
+ def test_sym_link_simple(self):
+ tmpd = self.tmp_dir()
+ link = self.tmp_path("link", tmpd)
+ target = self.tmp_path("target", tmpd)
+ util.write_file(target, "hello")
+
+ util.sym_link(target, link)
+ self.assertTrue(os.path.exists(link))
+ self.assertTrue(os.path.islink(link))
+
+ def test_sym_link_source_exists(self):
+ tmpd = self.tmp_dir()
+ link = self.tmp_path("link", tmpd)
+ target = self.tmp_path("target", tmpd)
+ util.write_file(target, "hello")
+
+ util.sym_link(target, link)
+ self.assertTrue(os.path.exists(link))
+
+ util.sym_link(target, link, force=True)
+ self.assertTrue(os.path.exists(link))
+
+ def test_sym_link_dangling_link(self):
+ tmpd = self.tmp_dir()
+ link = self.tmp_path("link", tmpd)
+ target = self.tmp_path("target", tmpd)
+
+ util.sym_link(target, link)
+ self.assertTrue(os.path.islink(link))
+ self.assertFalse(os.path.exists(link))
+
+ util.sym_link(target, link, force=True)
+ self.assertTrue(os.path.islink(link))
+ self.assertFalse(os.path.exists(link))
+
+ def test_sym_link_create_dangling(self):
+ tmpd = self.tmp_dir()
+ link = self.tmp_path("link", tmpd)
+ target = self.tmp_path("target", tmpd)
+
+ util.sym_link(target, link)
+ self.assertTrue(os.path.islink(link))
+ self.assertFalse(os.path.exists(link))
+
+
class TestUptime(CiTestCase):
@mock.patch('cloudinit.util.boottime')
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 68c12f9..ef1b588 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1779,7 +1779,7 @@ def is_link(path):
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
- if force and os.path.exists(link):
+ if force and os.path.lexists(link):
del_file(link)
os.symlink(source, link)
--
2.40.0

View File

@ -0,0 +1,80 @@
From 8cac3e2424a8d10dd1e0705c8558b71f7e7c37db Mon Sep 17 00:00:00 2001
From: Stefan Prietl <ederst@users.noreply.github.com>
Date: Mon, 13 Feb 2023 19:26:23 +0100
Subject: [PATCH 7/8] disk_setup: use byte string when purging the partition
table (#2012)
Reference:https://github.com/canonical/cloud-init/commit/bb414c7866c4728b2105e84f7b426ab81cc4bf4d
Conflict:change tests/unittests/test_handler/test_handler_disk_setup.py
not tests/unittests/config/test_cc_disk_setup.py
This writes a byte string to the device instead of a string when
purging the partition table.
Essentially, this will prevent the error "a bytes-like object is
required, not 'str'" from happening.
---
cloudinit/config/cc_disk_setup.py | 2 +-
.../test_handler/test_handler_disk_setup.py | 19 ++++++++++++++++++-
2 files changed, 19 insertions(+), 2 deletions(-)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 440f05f..abdc111 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -649,7 +649,7 @@ def get_partition_gpt_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = '\0'
+ null = b'\0'
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 4f4a57f..2f3a8df 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import random
+import tempfile
from cloudinit.config import cc_disk_setup
from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase
@@ -168,6 +169,23 @@ class TestUpdateFsSetupDevices(TestCase):
}, fs_setup)
+class TestPurgeDisk(TestCase):
+ @mock.patch(
+ "cloudinit.config.cc_disk_setup.read_parttbl", return_value=None
+ )
+ def test_purge_disk_ptable(self, *args):
+ pseudo_device = tempfile.NamedTemporaryFile()
+
+ cc_disk_setup.purge_disk_ptable(pseudo_device.name)
+
+ with pseudo_device as f:
+ actual = f.read()
+
+ expected = b"\0" * (1024 * 1024)
+
+ self.assertEqual(expected, actual)
+
+
@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device',
return_value=None)
@mock.patch('cloudinit.config.cc_disk_setup.find_device_node',
@@ -175,7 +193,6 @@ class TestUpdateFsSetupDevices(TestCase):
@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None)
@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', ''))
class TestMkfsCommandHandling(CiTestCase):
-
with_logs = True
def test_with_cmd(self, subp, *args):
--
2.40.0

View File

@ -0,0 +1,194 @@
From 19300c5cc6161949e28026876e9fe407d647b2c0 Mon Sep 17 00:00:00 2001
From: Chris Patterson <cpatterson@microsoft.com>
Date: Fri, 4 Feb 2022 14:17:38 -0500
Subject: [PATCH 1/8] sources/azure: fix metadata check in
_check_if_nic_is_primary() (#1232)
Reference:https://github.com/canonical/cloud-init/commit/6d817e94beb404d3917bf973bcb728aa6cc22ffe
Conflict:change tests/unittests/test_datasource/test_azure.py not tests/unittests/sources/test_azure.py
Currently _check_if_nic_is_primary() checks for imds_md is None,
but imds_md is returned as an empty dictionary on error fetching
metdata.
Fix this check and the tests that are incorrectly vetting IMDS
polling code.
Additionally, use response.contents instead of str(response) when
loding the JSON data returned from readurl. This helps simplify the
mocking and avoids an unncessary conversion.
Signed-off-by: Chris Patterson <cpatterson@microsoft.com>
---
cloudinit/sources/DataSourceAzure.py | 6 +-
tests/unittests/test_datasource/test_azure.py | 82 ++++++-------------
2 files changed, 27 insertions(+), 61 deletions(-)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 93493fa..f1e6642 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -1078,10 +1078,10 @@ class DataSourceAzure(sources.DataSource):
"primary nic.", ifname, e)
finally:
# If we are not the primary nic, then clean the dhcp context.
- if imds_md is None:
+ if not imds_md:
dhcp_ctx.clean_network()
- if imds_md is not None:
+ if imds_md:
# Only primary NIC will get a response from IMDS.
LOG.info("%s is the primary nic", ifname)
is_primary = True
@@ -2337,7 +2337,7 @@ def _get_metadata_from_imds(
json_decode_error = ValueError
try:
- return util.load_json(str(response))
+ return util.load_json(response.contents)
except json_decode_error as e:
report_diagnostic_event(
'Ignoring non-json IMDS instance metadata response: %s. '
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index cbc9665..62e657b 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -2853,16 +2853,6 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
'interface': 'eth9', 'fixed-address': '192.168.2.9',
'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
'unknown-245': '624c3620'}
- m_attach_call_count = 0
-
- def nic_attach_ret(nl_sock, nics_found):
- nonlocal m_attach_call_count
- m_attach_call_count = m_attach_call_count + 1
- if m_attach_call_count == 1:
- return "eth0"
- elif m_attach_call_count == 2:
- return "eth1"
- raise RuntimeError("Must have found primary nic by now.")
# Simulate two NICs by adding the same one twice.
md = {
@@ -2872,17 +2862,15 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
]
}
- def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
- if ifname == "eth0":
- return md
- raise requests.Timeout('Fake connection timeout')
-
m_isfile.return_value = True
- m_attach.side_effect = nic_attach_ret
+ m_attach.side_effect = [
+ "eth0",
+ "eth1",
+ ]
dhcp_ctx = mock.MagicMock(lease=lease)
dhcp_ctx.obtain_lease.return_value = lease
m_dhcpv4.return_value = dhcp_ctx
- m_imds.side_effect = network_metadata_ret
+ m_imds.side_effect = [md]
m_fallback_if.return_value = None
dsa._wait_for_all_nics_ready()
@@ -2894,10 +2882,11 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_imds.call_count)
self.assertEqual(2, m_link_up.call_count)
- @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback')
+ @mock.patch("cloudinit.url_helper.time.sleep", autospec=True)
+ @mock.patch("requests.Session.request", autospec=True)
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
def test_check_if_nic_is_primary_retries_on_failures(
- self, m_dhcpv4, m_imds):
+ self, m_dhcpv4, m_request, m_sleep):
"""Retry polling for network metadata on all failures except timeout
and network unreachable errors"""
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
@@ -2906,8 +2895,6 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
'unknown-245': '624c3620'}
- eth0Retries = []
- eth1Retries = []
# Simulate two NICs by adding the same one twice.
md = {
"interface": [
@@ -2916,55 +2903,34 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
]
}
- def network_metadata_ret(ifname, retries, type, exc_cb, infinite):
- nonlocal eth0Retries, eth1Retries
-
- # Simulate readurl functionality with retries and
- # exception callbacks so that the callback logic can be
- # validated.
- if ifname == "eth0":
- cause = requests.HTTPError()
- for _ in range(0, 15):
- error = url_helper.UrlError(cause=cause, code=410)
- eth0Retries.append(exc_cb("No goal state.", error))
- else:
- for _ in range(0, 10):
- # We are expected to retry for a certain period for both
- # timeout errors and network unreachable errors.
- if _ < 5:
- cause = requests.Timeout('Fake connection timeout')
- else:
- cause = requests.ConnectionError('Network Unreachable')
- error = url_helper.UrlError(cause=cause)
- eth1Retries.append(exc_cb("Connection timeout", error))
- # Should stop retrying after 10 retries
- eth1Retries.append(exc_cb("Connection timeout", error))
- raise cause
- return md
-
- m_imds.side_effect = network_metadata_ret
-
dhcp_ctx = mock.MagicMock(lease=lease)
dhcp_ctx.obtain_lease.return_value = lease
m_dhcpv4.return_value = dhcp_ctx
+ m_req = mock.Mock(content=json.dumps(md))
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout"),
+ requests.ConnectionError("Fake Network Unreachable"),
+ m_req,
+ ]
+
is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0")
self.assertEqual(True, is_primary)
self.assertEqual(2, expected_nic_count)
+ assert len(m_request.mock_calls) == 3
- # All Eth0 errors are non-timeout errors. So we should have been
- # retrying indefinitely until success.
- for i in eth0Retries:
- self.assertTrue(i)
+ # Re-run tests to verify max retries.
+ m_request.reset_mock()
+ m_request.side_effect = [
+ requests.Timeout("Fake connection timeout")
+ ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6
+
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1")
self.assertEqual(False, is_primary)
- # All Eth1 errors are timeout errors. Retry happens for a max of 10 and
- # then we should have moved on assuming it is not the primary nic.
- for i in range(0, 10):
- self.assertTrue(eth1Retries[i])
- self.assertFalse(eth1Retries[10])
+ assert len(m_request.mock_calls) == 11
@mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
def test_wait_for_link_up_returns_if_already_up(
--
2.40.0

View File

@ -0,0 +1,65 @@
From 479c7201b61d674e54e2ee7c347cd90fc0aaf1d3 Mon Sep 17 00:00:00 2001
From: Adam Collard <sparkiegeek@users.noreply.github.com>
Date: Fri, 8 Apr 2022 20:20:18 +0100
Subject: [PATCH 8/8] util: atomically update sym links to avoid Suppress
FileNotFoundError when reading status (#1298)
Reference:https://github.com/canonical/cloud-init/commit/0450a1faff9e5095e6da0865916501772b3972e9
Conflict:change tests/unittests/test_util.py not cloudinit/tests/test_util.py.
Atomically update the desired link file from a temporary file
when a stale link already exists.
This avoids FileNotFound errors due to races with
cloud-init status --wait when the symlink
/run/cloud-init/status.json already exists.
LP:1962150
---
cloudinit/tests/test_util.py | 5 ++++-
cloudinit/util.py | 7 ++++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 671b8bc..5fb2508 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -328,13 +328,16 @@ class TestSymlink(CiTestCase):
tmpd = self.tmp_dir()
link = self.tmp_path("link", tmpd)
target = self.tmp_path("target", tmpd)
+ target2 = self.tmp_path("target2", tmpd)
util.write_file(target, "hello")
+ util.write_file(target2, "hello2")
util.sym_link(target, link)
self.assertTrue(os.path.exists(link))
- util.sym_link(target, link, force=True)
+ util.sym_link(target2, link, force=True)
self.assertTrue(os.path.exists(link))
+ self.assertEqual("hello2", util.load_file(link))
def test_sym_link_dangling_link(self):
tmpd = self.tmp_dir()
diff --git a/cloudinit/util.py b/cloudinit/util.py
index d5e8277..0e0fc04 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1797,7 +1797,12 @@ def is_link(path):
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.lexists(link):
- del_file(link)
+ # Provide atomic update of symlink to avoid races with status --wait
+ # LP: #1962150
+ tmp_link = os.path.join(os.path.dirname(link), "tmp" + rand_str(8))
+ os.symlink(source, tmp_link)
+ os.replace(tmp_link, link)
+ return
os.symlink(source, link)
--
2.40.0

View File

@ -1,6 +1,6 @@
Name: cloud-init
Version: 21.4
Release: 15
Release: 17
Summary: the defacto multi-distribution package that handles early initialization of a cloud instance.
License: ASL 2.0 or GPLv3
URL: http://launchpad.net/cloud-init
@ -26,6 +26,16 @@ Patch14: backport-CVE-2022-2084.patch
Patch15: remove-schema-errors-from-log-for-cloudinit-config-cc_.patch
Patch16: backport-cloudinit-net-handle-two-different-routes-for-the-sa.patch
Patch17: backport-Cleanup-ephemeral-IP-routes-on-exception-2100.patch
Patch18: backport-CVE-2023-1786.patch
Patch6001: backport-sources-azure-fix-metadata-check-in-_check_if_nic_is.patch
Patch6002: backport-check-for-existing-symlink-while-force-creating-syml.patch
Patch6003: backport-BUG-1473527-module-ssh-authkey-fingerprints-fails-In.patch
Patch6004: backport-Resource-leak-cleanup-1556.patch
Patch6005: backport-DataSourceVMware-fix-var-use-before-init-1674.patch
Patch6006: backport-cc_set_hostname-ignore-var-lib-cloud-data-set-hostna.patch
Patch6007: backport-disk_setup-use-byte-string-when-purging-the-partitio.patch
Patch6008: backport-util-atomically-update-sym-links-to-avoid-Suppress-F.patch
Patch9000: Fix-the-error-level-logs-displayed-for-the-cloud-init-local-service.patch
@ -137,6 +147,20 @@ fi
%exclude /usr/share/doc/*
%changelog
* Sat Jul 29 2023 Lv Ying <lvying6@huawei.com> - 21.4-17
- backport upstream patches:
https://github.com/canonical/cloud-init/commit/fa53c7f4086f5937bc9bd328dba9f91ca73b6614
https://github.com/canonical/cloud-init/commit/9c7502a801763520639c66125eb373123d1e4f44
https://github.com/canonical/cloud-init/commit/2e17a0d626d41147b7d0822013e80179b3a81ee4
https://github.com/canonical/cloud-init/commit/9f0efc474ea430c75cd0abec3e2da719d4934346
https://github.com/canonical/cloud-init/commit/bb414c7866c4728b2105e84f7b426ab81cc4bf4d
https://github.com/canonical/cloud-init/commit/9cbd94dd57112083856ead0e0ff724e9d1c1f714
https://github.com/canonical/cloud-init/commit/6d817e94beb404d3917bf973bcb728aa6cc22ffe
https://github.com/canonical/cloud-init/commit/0450a1faff9e5095e6da0865916501772b3972e9
* Wed May 24 2023 shixuantong <shixuantong1@huawei.com> - 21.4-16
- fix CVE-2023-1786
* Fri May 19 2023 shixuantong <shixuantong1@huawei.com> - 21.4-15
- Cleanup ephemeral IP routes on exception and handle two different routes for the same ip