diff --git a/0001-support-kabi-check.patch b/0001-support-kabi-check.patch new file mode 100644 index 0000000..06a974c --- /dev/null +++ b/0001-support-kabi-check.patch @@ -0,0 +1,628 @@ +From 0f0cbce6c93b97e312cafead937b46e6b2ceaf51 Mon Sep 17 00:00:00 2001 +From: wang-guangge +Date: Thu, 9 Nov 2023 10:46:33 +0800 +Subject: [PATCH] support kabi check + +--- + ceres/manages/vulnerability_manage.py | 2 +- + hotpatch/hotupgrade.py | 97 +++++- + hotpatch/updateinfo_parse.py | 3 + + hotpatch/upgrade_en.py | 413 ++++++++++++++++++++++++++ + 4 files changed, 506 insertions(+), 9 deletions(-) + create mode 100644 hotpatch/upgrade_en.py + +diff --git a/ceres/manages/vulnerability_manage.py b/ceres/manages/vulnerability_manage.py +index c41a7fa..bad2dee 100644 +--- a/ceres/manages/vulnerability_manage.py ++++ b/ceres/manages/vulnerability_manage.py +@@ -620,7 +620,7 @@ class VulnerabilityManage: + Tuple[str, str] + a tuple containing two elements (update result, log). + """ +- code, stdout, stderr = execute_shell_command(f"dnf update {rpm_name} -y") ++ code, stdout, stderr = execute_shell_command(f"dnf upgrade-en {rpm_name} -y") + if code != CommandExitCode.SUCCEED: + return TaskExecuteRes.FAIL, stderr + if "Complete" not in stdout: +diff --git a/hotpatch/hotupgrade.py b/hotpatch/hotupgrade.py +index f61e37f..c508e07 100644 +--- a/hotpatch/hotupgrade.py ++++ b/hotpatch/hotupgrade.py +@@ -12,16 +12,14 @@ + # ******************************************************************************/ + from __future__ import print_function + +-from time import sleep + import dnf.base + import dnf.exceptions + import hawkey ++from time import sleep + from dnf.cli import commands + from dnf.cli.option_parser import OptionParser +- +-# from dnf.cli.output import Output + from dnfpluginscore import _, logger +- ++from .upgrade_en import UpgradeEnhanceCommand + from .hot_updateinfo import HotUpdateinfoCommand + from .updateinfo_parse import HotpatchUpdateInfo + from .syscare import Syscare +@@ -37,6 +35,9 @@ class HotupgradeCommand(dnf.cli.Command): + usage = "" + syscare = Syscare() + hp_list = [] ++ is_need_accept_kernel_hp = False ++ is_kernel_coldpatch_installed = False ++ kernel_coldpatch = '' + + @staticmethod + def set_argparser(parser): +@@ -50,6 +51,13 @@ class HotupgradeCommand(dnf.cli.Command): + parser.add_argument( + "--takeover", default=False, action='store_true', help=_('kernel cold patch takeover operation') + ) ++ parser.add_argument( ++ "-f", ++ dest='force', ++ default=False, ++ action='store_true', ++ help=_('force retain kernel rpm package if kernel kabi check fails'), ++ ) + + def configure(self): + """Verify that conditions are met so that this command can run. +@@ -104,17 +112,72 @@ class HotupgradeCommand(dnf.cli.Command): + + def run_transaction(self) -> None: + """ +- apply hot patches ++ apply hot patches, and process kabi check for kernel package rpm. + Returns: + None + """ + # syscare need a little bit time to process the installed hot patch + sleep(0.5) ++ ++ is_all_kernel_hp_actived = True ++ # hotpatch that fail to be activated will be automatically uninstalled ++ target_remove_hp = [] ++ acceptable_hp = [] + for hp in self.hp_list: +- self._apply_hp(hp) +- if self.opts.takeover and self.is_need_accept_kernel_hp: ++ status = self._apply_hp(hp) ++ if status: ++ target_remove_hp.append(hp) ++ if not hp.startswith('patch-kernel-'): ++ continue ++ if status: ++ is_all_kernel_hp_actived &= False ++ else: ++ is_all_kernel_hp_actived &= True ++ acceptable_hp.append(hp) ++ ++ for ts_item in self.base.transaction: ++ if ts_item.action not in dnf.transaction.FORWARD_ACTIONS: ++ continue ++ if str(ts_item.pkg) == self.kernel_coldpatch: ++ self.is_kernel_coldpatch_installed = True ++ ++ self.keep_hp_operation_atomic(is_all_kernel_hp_actived, target_remove_hp) ++ ++ if self.is_need_accept_kernel_hp and acceptable_hp: ++ logger.info(_('No available kernel cold patch for takeover, gonna accept available kernel hot patch.')) ++ for hp in acceptable_hp: + self._accept_kernel_hp(hp) + ++ def keep_hp_operation_atomic(self, is_all_kernel_hp_actived, target_remove_hp): ++ """ ++ Keep hotpatch related operation atomic. Once one kernel hotpatch is not successfully activated or ++ kabi check fails, uninstall the kernel coldpatch. And unsuccessfully activated hotpatch package ++ will be removed. ++ ++ Args: ++ is_all_kernel_hp_actived(bool): are all kernel related hotpatches activated ++ target_remove_hp(list): target remove hotpatch list ++ """ ++ upgrade_en = UpgradeEnhanceCommand(self.cli) ++ ++ if self.is_kernel_coldpatch_installed: ++ if not is_all_kernel_hp_actived: ++ logger.info(_('Gonna remove %s due to some kernel hotpatch activation failed.'), self.kernel_coldpatch) ++ upgrade_en.remove_rpm(str(self.kernel_coldpatch)) ++ self.is_need_accept_kernel_hp = False ++ # process kabi check ++ elif not upgrade_en.kabi_check(str(self.kernel_coldpatch)) and not self.opts.force: ++ logger.info(_('Gonna remove %s due to Kabi check failed.'), self.kernel_coldpatch) ++ # rebuild rpm database for processing kernel rpm remove operation ++ upgrade_en.rebuild_rpm_db() ++ upgrade_en.remove_rpm(str(self.kernel_coldpatch)) ++ self.is_need_accept_kernel_hp = True ++ ++ if target_remove_hp: ++ logger.info(_('Gonna remove unsuccessfully activated hotpatch rpm.')) ++ for hotpatch in target_remove_hp: ++ upgrade_en.remove_rpm(hotpatch) ++ + def _apply_hp(self, hp_full_name): + pkg_info = self._parse_hp_name(hp_full_name) + hp_subname = self._get_hp_subname_for_syscare(pkg_info) +@@ -123,6 +186,7 @@ class HotupgradeCommand(dnf.cli.Command): + logger.info(_('Apply hot patch failed: %s.'), hp_subname) + else: + logger.info(_('Apply hot patch succeed: %s.'), hp_subname) ++ return status + + @staticmethod + def _get_hp_subname_for_syscare(pkg_info: dict) -> str: +@@ -394,9 +458,11 @@ class HotupgradeCommand(dnf.cli.Command): + """ + process takeover operation. + """ ++ if not self.get_kernel_hp_list(): ++ return + kernel_coldpatch = self.get_target_installed_kernel_coldpatch_of_hotpatch() +- self.is_need_accept_kernel_hp = False + if kernel_coldpatch: ++ self.kernel_coldpatch = kernel_coldpatch + logger.info(_("Gonna takeover kernel cold patch: ['%s']" % kernel_coldpatch)) + success = self._install_rpm_pkg([kernel_coldpatch]) + if success: +@@ -412,6 +478,21 @@ class HotupgradeCommand(dnf.cli.Command): + ) + return + ++ def get_kernel_hp_list(self) -> list: ++ """ ++ Get kernel hp list from self.hp_list. ++ ++ Returns: ++ list: kernel hp list ++ e.g. ++ ['patch-kernel-5.10.0-153.12.0.92.oe2203sp2-ACC-1-1.x86_64'] ++ """ ++ kernel_hp_list = [] ++ for hp in self.hp_list: ++ if hp.startswith('patch-kernel-'): ++ kernel_hp_list.append(hp) ++ return kernel_hp_list ++ + def get_target_installed_kernel_coldpatch_of_hotpatch(self) -> str: + """ + get the highest kernel cold patch of hot patch in "dnf hot-updateinfo list cves", if the corresponding +diff --git a/hotpatch/updateinfo_parse.py b/hotpatch/updateinfo_parse.py +index 4760378..fc39d48 100644 +--- a/hotpatch/updateinfo_parse.py ++++ b/hotpatch/updateinfo_parse.py +@@ -322,6 +322,9 @@ class HotpatchUpdateInfo(object): + cmd = ["uname", "-r"] + kernel_version = '' + kernel_version, return_code = cmd_output(cmd) ++ # 'uname -r' show the kernel version-release.arch of the current system ++ # [root@openEuler hotpatch]# uname -r ++ # 5.10.0-136.12.0.86.oe2203sp1.x86_64 + if return_code != SUCCEED: + return kernel_version + kernel_version = kernel_version.split('\n')[0] +diff --git a/hotpatch/upgrade_en.py b/hotpatch/upgrade_en.py +new file mode 100644 +index 0000000..266bcae +--- /dev/null ++++ b/hotpatch/upgrade_en.py +@@ -0,0 +1,413 @@ ++#!/usr/bin/python3 ++# ****************************************************************************** ++# Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. ++# licensed under the Mulan PSL v2. ++# You can use this software according to the terms and conditions of the Mulan PSL v2. ++# You may obtain a copy of Mulan PSL v2 at: ++# http://license.coscl.org.cn/MulanPSL2 ++# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++# PURPOSE. ++# See the Mulan PSL v2 for more details. ++# ******************************************************************************/ ++import dnf ++import gzip ++import subprocess ++from dnfpluginscore import _ ++from dnf.cli import commands ++from dnf.cli.commands.upgrade import UpgradeCommand ++from dnf.cli.option_parser import OptionParser ++ ++SUCCEED = 0 ++FAIL = 255 ++ ++ ++def cmd_output(cmd): ++ try: ++ result = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ++ result.wait() ++ return result.stdout.read().decode('utf-8'), result.returncode ++ except Exception as e: ++ print("error: ", e) ++ return str(e), FAIL ++ ++ ++@dnf.plugin.register_command ++class UpgradeEnhanceCommand(dnf.cli.Command): ++ SYMVERS_FILE = "/boot/symvers-%s.gz" ++ ++ aliases = ['upgrade-en'] ++ summary = _( ++ 'upgrade with KABI(Kernel Application Binary Interface) check. If the loaded kernel modules \ ++ have KABI compatibility with the new version kernel rpm, the kernel modules can be installed \ ++ and used in the new version kernel without recompling.' ++ ) ++ ++ @staticmethod ++ def set_argparser(parser): ++ parser.add_argument( ++ 'packages', ++ nargs='*', ++ help=_('Package to upgrade'), ++ action=OptionParser.ParseSpecGroupFileCallback, ++ metavar=_('PACKAGE'), ++ ) ++ parser.add_argument( ++ "-f", ++ dest='force', ++ default=False, ++ action='store_true', ++ help=_('force retain kernel rpm package if kernel kabi check fails'), ++ ) ++ ++ def configure(self): ++ """Verify that conditions are met so that this command can run. ++ ++ These include that there are enabled repositories with gpg ++ keys, and that this command is being run by the root user. ++ """ ++ demands = self.cli.demands ++ demands.sack_activation = True ++ demands.available_repos = True ++ demands.resolving = True ++ demands.root_user = True ++ commands._checkGPGKey(self.base, self.cli) ++ if not self.opts.filenames: ++ commands._checkEnabledRepo(self.base) ++ self.upgrade_minimal = None ++ self.all_security = None ++ self.skipped_grp_specs = None ++ ++ def run(self): ++ self.upgrade() ++ ++ def run_transaction(self): ++ """ ++ Process kabi check for kernel rpm package installed this time. If the kernel rpm pakcgae fails kabi check, ++ uninstall it. ++ """ ++ for ts_item in self.base.transaction: ++ if ts_item.action not in dnf.transaction.FORWARD_ACTIONS: ++ continue ++ if ts_item.pkg.name == 'kernel': ++ kernel_pkg = str(ts_item.pkg) ++ success = self.kabi_check(kernel_pkg) ++ if not success and not self.opts.force: ++ print('Gonna remove %s due to kabi check failed.' % kernel_pkg) ++ # rebuild rpm database for processing kernel rpm remove operation ++ self.rebuild_rpm_db() ++ self.remove_rpm(kernel_pkg) ++ ++ def remove_rpm(self, pkg: str): ++ """ ++ Remove rpm package via command line. ++ ++ Args: ++ pkg(str): package name ++ e.g. ++ kernel-5.10.0-153.18.0.94.oe2203sp2.x86_64 ++ """ ++ remove_cmd = ["dnf", "remove", pkg, "-y"] ++ output, return_code = cmd_output(remove_cmd) ++ if return_code != SUCCEED: ++ print('Remove package failed: %s.' % pkg) ++ exit(1) ++ else: ++ print('Remove package succeed: %s.' % pkg) ++ # do not achieve the expected result of installing related kernel rpm ++ exit(1) ++ ++ def rebuild_rpm_db(self): ++ """ ++ Rebuild rpm database for processing kernel rpm remove operation. ++ """ ++ rebuilddb_cmd = ["rpm", "--rebuilddb"] ++ output, return_code = cmd_output(rebuilddb_cmd) ++ if return_code != SUCCEED: ++ print('Rebuild rpm database failed.') ++ else: ++ print('Rebuild rpm database succeed.') ++ ++ def kabi_check(self, pkg: str) -> bool: ++ """ ++ Process kabi check after upgrading kernel rpm. ++ ++ Args: ++ pkg(str): package name ++ e.g. ++ kernel-5.10.0-153.18.0.94.oe2203sp2.x86_64 ++ ++ Returns: ++ bool: kabi check result ++ """ ++ print("Kabi check for %s:" % pkg) ++ # version-release.arch ++ evra = pkg.split("-", 1)[1] ++ symvers_file = self.SYMVERS_FILE % (evra) ++ ++ target_symvers_symbol_crc_mapping, return_code = self.get_target_symvers_symbol_crc_mapping(symvers_file) ++ if return_code != SUCCEED: ++ print('[Fail] Cannot find the symvers file of %s.', pkg) ++ return False ++ module_actual_symbol_crc_mapping = self.get_module_actual_symbol_crc_mapping() ++ ++ module_different_symbol_crc_mapping = self.compare_actual_and_target_symvers_symbol_crc_mapping( ++ module_actual_symbol_crc_mapping, target_symvers_symbol_crc_mapping ++ ) ++ ++ sum_module_num = len(module_actual_symbol_crc_mapping) ++ fail_module_num = len(module_different_symbol_crc_mapping) ++ pass_module_num = sum_module_num - fail_module_num ++ ++ reminder_statement = "Here are %s loaded kernel modules in this system, %s pass, %s fail." % ( ++ sum_module_num, ++ pass_module_num, ++ fail_module_num, ++ ) ++ ++ if fail_module_num > 0: ++ print('[Fail] %s' % reminder_statement) ++ self.output_symbol_crc_difference_report(module_different_symbol_crc_mapping) ++ return False ++ ++ print('[Success] %s' % reminder_statement) ++ return True ++ ++ def output_symbol_crc_difference_report(self, module_different_symbol_crc_mapping: dict): ++ """ ++ Format the output for symbol crc difference report. ++ The output is as follows: ++ ++ Failed modules are as follows: ++ No. Module Difference ++ 1 upatch ipv6_chk_custom_prefix : 0x0c994af2 != 0x0c994af3 ++ pcmcia_reset_card : 0xe9bed965 != null ++ 2 crct10dif_pclmul crypto_unregister_shash: 0x60f5b0b7 != 0x0c994af3 ++ __fentry__ : 0xbdfb6dbb != null ++ """ ++ print('Failed modules are as follows:') ++ ++ title = ['No.', 'Module', 'Difference'] ++ # column width ++ sequence_width = len(title[0]) ++ module_width = len(title[1]) ++ symbol_width = crc_info_width = 0 ++ ++ for seq, module_name in enumerate(module_different_symbol_crc_mapping): ++ # the sequence starts from 1 ++ seq = seq + 1 ++ sequence_width = max(sequence_width, len(str(seq))) ++ different_symbol_crc_mapping = module_different_symbol_crc_mapping[module_name] ++ module_width = max(module_width, len(module_name)) ++ for symbol, crc_list in different_symbol_crc_mapping.items(): ++ symbol_width = max(symbol_width, len(symbol)) ++ crc_info = "%s != %s" % (crc_list[0], crc_list[1]) ++ crc_info_width = max(crc_info_width, len(crc_info)) ++ ++ # print title ++ print('%-*s %-*s %s' % (sequence_width, title[0], module_width, title[1], title[2])) ++ ++ for seq, module_name in enumerate(module_different_symbol_crc_mapping): ++ seq = seq + 1 ++ print('%-*s %-*s' % (sequence_width, seq, module_width, module_name), end='') ++ different_symbol_crc_mapping = module_different_symbol_crc_mapping[module_name] ++ is_first_symbol = True ++ for symbol, crc_list in different_symbol_crc_mapping.items(): ++ crc_info = "%s != %s" % (crc_list[0], crc_list[1]) ++ if is_first_symbol: ++ print(' %-*s: %s' % (symbol_width, symbol, crc_info), end='') ++ is_first_symbol = False ++ else: ++ print( ++ ' %-*s %-*s: %s' % (sequence_width + module_width, "", symbol_width, symbol, crc_info), end='' ++ ) ++ print('') ++ ++ def compare_actual_and_target_symvers_symbol_crc_mapping( ++ self, module_actual_symbol_crc_mapping: dict, target_symvers_symbol_crc_mapping: dict ++ ) -> dict: ++ """ ++ Compare the actual symbol crc mapping with the target symvers symbol crc mapping. ++ ++ Args: ++ module_actual_symbol_crc_mapping(dict): module actual symbol crc mapping ++ e.g. ++ { ++ 'upatch': { ++ 'ipv6_chk_custom_prefix': '0x0c994af3', ++ 'pcmcia_reset_card': '0xe9bed965', ++ } ++ } ++ ++ target_symvers_symbol_crc_mapping(dict): target symvers symbol crc mapping ++ e.g. ++ { ++ 'ipv6_chk_custom_prefix': '0x0c994af2', ++ 'pcmcia_reset_card': '0xe9bed965', ++ } ++ ++ Returns: ++ dict: module different symbol crc mapping ++ e.g. ++ { ++ 'upatch': { ++ 'ipv6_chk_custom_prefix': ['0x0c994af3', '0x0c994af2']. ++ } ++ } ++ """ ++ module_different_symbol_crc_mapping = dict() ++ for module_name, actual_symbol_crc_mapping in module_actual_symbol_crc_mapping.items(): ++ different_symbol_crc_mapping = dict() ++ for actual_symbol, actual_crc in actual_symbol_crc_mapping.items(): ++ if actual_symbol not in target_symvers_symbol_crc_mapping: ++ continue ++ elif target_symvers_symbol_crc_mapping[actual_symbol] != actual_symbol_crc_mapping[actual_symbol]: ++ different_symbol_crc_mapping[actual_symbol] = [ ++ actual_crc, ++ target_symvers_symbol_crc_mapping[actual_symbol], ++ ] ++ if not different_symbol_crc_mapping: ++ continue ++ module_different_symbol_crc_mapping[module_name] = different_symbol_crc_mapping ++ return module_different_symbol_crc_mapping ++ ++ def get_module_actual_symbol_crc_mapping(self) -> dict: ++ """ ++ Get the module actual symbol crc mapping of the driver modules currently being loaded in the system. ++ ++ Returns: ++ dict: module actual symbol crc mapping ++ e.g. ++ { ++ 'upatch': { ++ 'ipv6_chk_custom_prefix': '0x0c994af3', ++ 'pcmcia_reset_card': '0xe9bed965', ++ } ++ } ++ """ ++ module_actual_symbol_crc_mapping = dict() ++ lsmod_cmd = ["lsmod"] ++ # 'lsmod' shows all modules loaded in the system ++ # e.g. ++ # [root@openEuler ~]# lsmod ++ # Module Size Used by ++ # upatch 53248 0 ++ # nft_fib_inet 16384 1 ++ # nft_fib_ipv4 16384 1 nft_fib_inet ++ list_output, return_code = cmd_output(lsmod_cmd) ++ if return_code != SUCCEED: ++ return module_actual_symbol_crc_mapping ++ ++ content = list_output.split('\n') ++ for line in content[1:]: ++ if not line: ++ continue ++ module_name = line.split()[0] ++ modinfo_cmd = ['modinfo', module_name, '-n'] ++ # 'modinfo module_name -n' shows module path information ++ # e.g. ++ # [root@openEuler ~]# modinfo upatch -n ++ # /lib/modules/5.10.0-153.12.0.92.oe2203sp2.x86_64/weak-updates/syscare/upatch.ko ++ module_path_output, return_code = cmd_output(modinfo_cmd) ++ if return_code != SUCCEED: ++ continue ++ ++ module_path = module_path_output.split('\n')[0] ++ actual_symbol_crc_mapping, return_code = self.get_actual_symbol_crc_mapping(module_path) ++ if return_code != SUCCEED: ++ continue ++ ++ module_actual_symbol_crc_mapping[module_name] = actual_symbol_crc_mapping ++ return module_actual_symbol_crc_mapping ++ ++ def get_actual_symbol_crc_mapping(self, module_path: str) -> (dict, int): ++ """ ++ Get actual symbol crc mapping for specific module. ++ ++ Args: ++ module_path(str): loaded module path ++ ++ Returns: ++ dict, bool: actual symbol crc mapping, return code ++ """ ++ actual_symbol_crc_mapping = dict() ++ modprobe_cmd = ['modprobe', '--dump', module_path] ++ # 'modprobe --dump module_path' shows module related kabi information ++ # e.g. ++ # [root@openEuler ~]# modprobe --dump \ ++ # /lib/modules/5.10.0-153.12.0.92.oe2203sp2.x86_64/weak-updates/syscare/upatch.ko ++ # 0xe32130cf module_layout ++ # 0x9c4befaf kmalloc_caches ++ # 0xeb233a45 __kmalloc ++ # 0xd6ee688f vmalloc ++ # 0x349cba85 strchr ++ # 0x754d539c strlen ++ crc_symbol_output_lines, return_code = cmd_output(modprobe_cmd) ++ if return_code != SUCCEED: ++ return actual_symbol_crc_mapping, return_code ++ ++ crc_symbol_output = crc_symbol_output_lines.split('\n') ++ for crc_symbol_line in crc_symbol_output: ++ if not crc_symbol_line: ++ continue ++ crc_symbol_line = crc_symbol_line.split() ++ crc, symbol = crc_symbol_line[0], crc_symbol_line[1] ++ actual_symbol_crc_mapping[symbol] = crc ++ return actual_symbol_crc_mapping, return_code ++ ++ def get_target_symvers_symbol_crc_mapping(self, symvers_file: str) -> (dict, int): ++ """ ++ Get target symbol crc mapping from symvers file of kernel rpm package. The symvers file content is ++ as follows(e.g.): ++ ++ 0x0c994af3 ipv6_chk_custom_prefix vmlinux EXPORT_SYMBOL ++ 0xe9bed965 pcmcia_reset_card vmlinux EXPORT_SYMBOL ++ 0x55417264 unregister_vt_notifier vmlinux EXPORT_SYMBOL_GPL ++ 0x8c8905c0 set_anon_super vmlinux EXPORT_SYMBOL ++ 0x3ba051a9 __cleancache_invalidate_page vmlinux EXPORT_SYMBOL ++ ++ the first column is crc(Cyclic Redundancy Check), and the second column is symbol. ++ ++ Args: ++ symvers_file(str): symvers file path ++ ++ Returns: ++ dict, int: target symvers symbol crc mapping, return_code ++ e.g. ++ { ++ 'ipv6_chk_custom_prefix': '0x0c994af3', ++ 'pcmcia_reset_card': '0xe9bed965', ++ }, ++ SUCCEED ++ """ ++ symvers_symbol_crc_mapping = dict() ++ try: ++ content = gzip.open(symvers_file, 'rb') ++ except FileNotFoundError as e: ++ print("error: ", e) ++ return symvers_symbol_crc_mapping, FAIL ++ ++ for line in content.readlines(): ++ line = line.decode() ++ line = line.split() ++ crc, symbol = line[0], line[1] ++ symvers_symbol_crc_mapping[symbol] = crc ++ content.close() ++ return symvers_symbol_crc_mapping, SUCCEED ++ ++ def upgrade(self): ++ """ ++ Use UpgradeCommand to process the upgrade operation. ++ """ ++ upgrade = UpgradeCommand(self.cli) ++ upgrade.upgrade_minimal = self.upgrade_minimal ++ upgrade.opts = self.opts ++ upgrade.opts.filenames = self.opts.filenames ++ upgrade.opts.pkg_specs = self.opts.pkg_specs ++ upgrade.opts.grp_specs = self.opts.grp_specs ++ ++ upgrade.upgrade_minimal = None ++ upgrade.all_security = None ++ upgrade.skipped_grp_specs = None ++ ++ upgrade.run() +-- +2.27.0 + diff --git a/0002-modify-re-of-kernel-filter.patch b/0002-modify-re-of-kernel-filter.patch new file mode 100644 index 0000000..392de4f --- /dev/null +++ b/0002-modify-re-of-kernel-filter.patch @@ -0,0 +1,81 @@ +From 831aca01a20fcd67b1d6ff604a0334aaa419efd5 Mon Sep 17 00:00:00 2001 +From: gongzt +Date: Sat, 11 Nov 2023 17:47:25 +0800 +Subject: Modify the regular expression of kernel filter +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +--- + ceres/manages/vulnerability_manage.py | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/ceres/manages/vulnerability_manage.py b/ceres/manages/vulnerability_manage.py +index c41a7fa..39f475d 100644 +--- a/ceres/manages/vulnerability_manage.py ++++ b/ceres/manages/vulnerability_manage.py +@@ -166,7 +166,7 @@ class VulnerabilityManage: + return rpm_info + + for line in stdout.splitlines(): +- rpm_name, new_rpm_info = line.split(":",1) ++ rpm_name, new_rpm_info = line.split(":", 1) + old_rpm_info = rpm_info.get(rpm_name, "") + rpm_info[rpm_name] = new_rpm_info if new_rpm_info > old_rpm_info else old_rpm_info + LOGGER.debug("query installed rpm package info succeed!") +@@ -200,7 +200,7 @@ class VulnerabilityManage: + # ("kernel", "x86_64.", "5.10.0-60.105.0.132.oe2203"), + # ("kernel-debuginfo", "x86_64", "5.10.0-60.105.0.132.oe2203") + # ] +- rpm_info = re.findall("^(kernel\S*)\.([a-z468_]+)\s+(\S+)", stdout, re.MULTILINE) ++ rpm_info = re.findall("^(kernel)\.([a-z468_]+)\s+(\S+)", stdout, re.MULTILINE) + + if not rpm_info: + return result +@@ -243,7 +243,7 @@ class VulnerabilityManage: + # ("CVE-2021-43976", "Important/Sec.", "kernel-4.19.90-2201.1.0.0132.oe1.x86_64"), + # ("CVE-2021-0941", "Important/Sec.", "kernel-4.19.90-2201.1.0.0132.oe1.x86_64") + # ] +- all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel\S+)", stdout) ++ all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel-\d\S+)", stdout) + if not all_cve_info: + return unfixed_cves + +@@ -306,7 +306,7 @@ class VulnerabilityManage: + # ("CVE-2023-1513", "Important/Sec.", "kernel-4.19.90-2304.1.0.0196.oe1.x86_64", "patch-kernel-4.19.90-2112.."), + # ("CVE-2021-xxxx", "Important/Sec.", "-", "patch-redis-6.2.5-1-SGL_CVE_2023_1111_CVE_2023_1112-1-1.x86_64") + # ] +- all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel\S+|-)\s+(patch-kernel\S+|-)", stdout) ++ all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel-\d\S+|-)\s+(patch-kernel-\d\S+|-)", stdout) + if not all_cve_info: + return cve_info_list + +@@ -368,7 +368,7 @@ class VulnerabilityManage: + # ("CVE-2021-43976","Important/Sec.", "kernel-4.19.90-2201.1.0.0132.oe1.x86_64"), + # ("CVE-2021-0941","Important/Sec.", "kernel-4.19.90-2201.1.0.0132.oe1.x86_64") + # ] +- fixed_cves_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel\S+)", stdout) ++ fixed_cves_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel-\d\S+)", stdout) + + if not fixed_cves_info: + return fixed_cves +@@ -407,7 +407,7 @@ class VulnerabilityManage: + # ("CVE-2021-xxxx", "Important/Sec.", "-", "patch-redis-6.2.5-1-SGL_CVE_2023_1111_CVE_2023_1112-1-1.x86_64") + # ] + hotpatch_status = self._query_applied_hotpatch_status() +- all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel\S+|-)\s+(patch-kernel\S+|-)", stdout) ++ all_cve_info = re.findall(r"(CVE-\d{4}-\d+)\s+([\w+/.]+)\s+(kernel-\d\S+|-)\s+(patch-kernel-\d\S+|-)", stdout) + + cve_info_fixed_by_coldpatch, cve_info_fixed_by_hotpatch, hotpatch_dic = [], [], defaultdict(str) + for cve_id, _, coldpatch, hotpatch in all_cve_info: +@@ -472,7 +472,7 @@ class VulnerabilityManage: + # ("CVE-2023-1112", "redis-6.2.5-1/SGL_CVE_2023_1111_CVE_2023_1112-1-1/redis-server", "NOT-APPLIED"), + # ("CVE-2023-1111", "redis-6.2.5-1/ACC-1-1/redis-benchmark", "ACTIVED") + # ] +- applied_hotpatch_info_list = re.findall(r"(CVE-\d{4}-\d+)\s+(kernel[\w\-/.]+)\s+([A-W]+)", stdout) ++ applied_hotpatch_info_list = re.findall(r"(CVE-\d{4}-\d+)\s+(kernel-\d[\w\-/.]+)\s+([A-W]+)", stdout) + + if not applied_hotpatch_info_list: + return result +-- +Gitee diff --git a/0003-fix-bug-in-test_hotpatch.py.patch b/0003-fix-bug-in-test_hotpatch.py.patch new file mode 100644 index 0000000..a402546 --- /dev/null +++ b/0003-fix-bug-in-test_hotpatch.py.patch @@ -0,0 +1,27 @@ +From ccbd7a6dea68303fb7ec6f777f0e6b8d9e6c7773 Mon Sep 17 00:00:00 2001 +From: wang-guangge +Date: Wed, 15 Nov 2023 10:35:42 +0800 +Subject: [PATCH] fix bug in test_hotpatch.py + +--- + hotpatch/test_hotpatch.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/hotpatch/test_hotpatch.py b/hotpatch/test_hotpatch.py +index f46faed..e903d55 100644 +--- a/hotpatch/test_hotpatch.py ++++ b/hotpatch/test_hotpatch.py +@@ -13,8 +13,8 @@ + import unittest + from unittest import mock + +-from .hotpatch import HotpatchCommand +-from .syscare import SUCCEED, FAIL ++from .hotpatch_ops import HotpatchCommand ++from .syscare import FAIL, SUCCEED + + + class HotpatchTestCase(unittest.TestCase): +-- +2.27.0 + diff --git a/0004-Modify-method-of-mark-invalid-data-and-add-summary.patch b/0004-Modify-method-of-mark-invalid-data-and-add-summary.patch new file mode 100644 index 0000000..cdada06 --- /dev/null +++ b/0004-Modify-method-of-mark-invalid-data-and-add-summary.patch @@ -0,0 +1,1594 @@ +From 509f622afa19c0c62834952908db065a237c7e48 Mon Sep 17 00:00:00 2001 +From: LHesperus <2639350497@qq.com> +Date: Sun, 19 Nov 2023 20:31:31 +0800 +Subject: [PATCH] Modify method of mark invalid data and add summary + +--- + extra-tools/da-tool/README.md | 166 +++++++++--- + extra-tools/da-tool/analysis/config.cpp | 11 +- + extra-tools/da-tool/analysis/config.h | 4 +- + .../da-tool/analysis/function_stack.cpp | 157 ++++++++---- + extra-tools/da-tool/analysis/function_stack.h | 15 +- + .../da-tool/analysis/sched_analysis.cpp | 100 ++++---- + extra-tools/da-tool/analysis/sched_analysis.h | 7 +- + extra-tools/da-tool/analysis/time_pair.cpp | 240 ++++++++++-------- + extra-tools/da-tool/analysis/time_pair.h | 32 ++- + .../da-tool/analysis/trace_resolve.cpp | 5 +- + extra-tools/da-tool/analysis/trace_resolve.h | 7 - + extra-tools/da-tool/conf/da-tool.conf | 6 +- + extra-tools/da-tool/main.cpp | 7 +- + extra-tools/da-tool/script/da-tool.sh | 2 +- + extra-tools/da-tool/test/case/case1/case1.cpp | 64 +++++ + .../da-tool/test/case/case2/case2_udp_cli.c | 37 +++ + .../test/case/case2/case2_udp_ser_noblk.c | 53 ++++ + 17 files changed, 631 insertions(+), 282 deletions(-) + create mode 100644 extra-tools/da-tool/test/case/case1/case1.cpp + create mode 100644 extra-tools/da-tool/test/case/case2/case2_udp_cli.c + create mode 100644 extra-tools/da-tool/test/case/case2/case2_udp_ser_noblk.c + +diff --git a/extra-tools/da-tool/README.md b/extra-tools/da-tool/README.md +index b9a603b..209b43b 100644 +--- a/extra-tools/da-tool/README.md ++++ b/extra-tools/da-tool/README.md +@@ -13,9 +13,9 @@ + + 线程的调度特征 + + ## 主要功能 +-+ 分析出内核态函数/用户态函数(C/C++)/sched_switch的时延 +-+ 统计各调用栈时延平均值 ++ + + 推导调用关系(乱序) +++ 统计各函数调用栈时延信息 + + 线程调度特征 + + 记录系统信息 + +@@ -23,20 +23,21 @@ + + 不支持递归函数分析 + + ## 文件夹说明 +- ++**工程文件夹** + + script : shell脚本 +- + da-tool.sh 利用`uprobe/kprobe` 采集程序trace,同时可生成analysis需要的配置文件 ++ + da-tool.sh 利用`uprobe/kprobe` 采集程序 `trace`,同时可生成analysis需要的配置文件 + + config 配置文件夹 + + da-tool.conf 放置`/etc` 目录下 + + analysis `C++`程序文件 + + config(.cpp/ch ): 解析外部参数 和 `function_config` + + common.h : 通用参数 +- + trace_resolve(.cpp/.h) : 解析trace ++ + trace_resolve(.cpp/.h) : 解析`trace` + + time_pair(.cpp/.h)::获取各函数的起止时间等信息 + + function_strack(.cpp/.h): 获取各pid 函数调用栈及其时延信息 + + sched_analysis(.cpp/.h): 计算线程调度信息 + + main.cpp + + CMakeLists.txt +++ test : 测试程序 + + **本地文件夹** + +@@ -50,51 +51,131 @@ + + output : 时延结果信息 + + ## 使用方法 ++### 命令格式 ++时延分析工具通过`da-tool.sh`命令采集和分析函数时延,使用格式为 + +-#### 1 配置需要采集的函数 +-配置文件:da-tool.conf +-+ k 开头为 kernel 符号,u 开头为内核符号, s 开头为调度配置(目前仅支持`sched_switch`) +-+ k 和 s 只能一行配置完 +-+ u 可以多行配置, 格式:`[u , 程序路径,二进制名称 ,追踪的函数]` +-+ 函数务必存在,否则uprobe 配置不成功,配置的内核符号应在`/proc/kallsyms`能够查询到,配置的用户态程序符号仅支持`C/C++`,配置的符号应用`objdump`能够查询到 +-+ 每行末尾不要有逗号 ++**da-tool.sh** [-t <*probe time*>] + +-配置文件举例如下: ++|参数|是否必选|参数函数| ++|----|--------|-------| ++|-t |否| 采集函数 `trace` 的时长,单位秒,最大限制 100,默认10| ++ ++### 自定义配置函数 ++配置文件:`/etc/da-tool.conf` ++ ++举例如下: + ``` +-# kernel symbol config +-k,ksys_write,udp_recvmsg,udp_sendmsg ++# /etc/da-tool.conf ++ ++# kernel symbol config (ref: /proc/kallsyms) ++k,udp_recvmsg,udp_sendmsg,dev_queue_xmit,udp_send_skb,sock_recvmsg,__skb_recv_udp,udp_rcv + + # sched config + s,sched_switch + +-# user symbol config (path ,bin,func1,func2) ++# user symbol config (format : u,path,bin_name,func1,func2,...,funcN) ++# u,/path/,bin_name,sendto ++u,/home/git_repo/nda-tool/nda-tool/test/net_demo2/,server_no_blk,recvfrom ++u,/home/git_repo/nda-tool/nda-tool/test/net_demo2/,client_to_noblk,sendto + u,/home/git_repo/nda-tool/nda-tool/test/base_demo/,base_demo_cpp,_Z5func1v,_Z5func2v,_Z5func2i,_Z5func3v,_Z5func4v +-u,/home/git_repo/nda-tool/nda-tool/test/net_demo1/,client,sendto,recvfrom ++# end + ``` +-备注,为了支持用户态不同二进制重名函数采集,配置`event`时,命名为`u0_func1``、u1_func1`...,以上面配置为例,`loop_func`为`u2_loop_func`, +-观测`trace` 结果时不要产生歧义。 +-#### 2 采集trace并分析 + +-```shell +-da-tool.sh -t 10 # 采集10秒并分析结果 +++ k 开头为 kernel 符号,u 开头为用户态程序符号, s 开头为调度配置(目前仅支持`sched_switch`且必须配置) +++ k 和 s 只能一行配置完 +++ u 可以多行配置, 格式:`[u,程序路径,二进制名称,追踪的函数]` +++ 请确保函数存在,否则 `uprobe` 配置不成功,所配置的内核符号应在`/proc/kallsyms`能够查询到,所配置的用户态程序符号仅支持`C/C++`,且通过`objdump`应能够查询到 +++ 每行末尾不要有逗号 ++ ++ ++注意,为了支持用户态不同二进制重名函数采集,配置`event`时,命名为`u0_func1`、`u1_func1`...,以上面配置为例,`_Z5func1v`为`u2__Z5func1v`。 ++### 分析结果说明 ++ +++ 终端输出结果:各函数调用栈的时延信息 +++ 文件夹输出结果 : `/var/da-tool/analysis-output/ouput/` ++ + func_delay_stack : 函数调用栈时延结果 ++ + process_sched_info :进程的调度信息 ++ + summary_delay.csv : 时延分析总结报告 ++ + summary_sched.csv : 调度分析总结报告 ++ ++#### 终端输出结果介绍 + ``` +-采集后会在`/var/da-tool/tmp`文件夹下生成一个`output_时间`格式的文件夹,包含此次采样的结果。 +-分析完成后会在`/var/da-tool/analysis-output/ouput/`下生成分析结果 ++├──pid: 222459{local:(450040, 44.988%), global:(1000346, 100.000%)} ++│ ├─────sched_switch{local:(13160, 1.316%, 453.793), global:(13160, 1.316%, 453.793), times:29, (int)ret>=0 times:29} ++│ └─────u0_recvfrom{local:(422312, 42.217%, 10.729), global:(537146, 53.696%, 13.646), times:39362, (int)ret>=0 times:20} ++│ ├─────sched_switch{local:(2927, 0.293%, 209.071), global:(2927, 0.293%, 209.071), times:14, (int)ret>=0 times:14} ++│ └─────sock_recvmsg{local:(55313, 5.529%, 1.405), global:(111907, 11.187%, 2.843), times:39362, (int)ret>=0 times:20} ++│ └─────udp_recvmsg{local:(36357, 3.634%, 0.924), global:(56594, 5.657%, 1.438), times:39362, (int)ret>=0 times:20} ++│ └─────__skb_recv_udp{local:(20237, 2.023%, 0.514), global:(20237, 2.023%, 0.514), times:39362, (int)ret>=0 times:39342} + + ``` +-├──pid:1473358{(868869,100.000%)} +-│ │ ├─────u0__Z5func1v{(local: 19, 0.002%, 19.000)(global:150399, 17.310% ,150399.000), times:1, (int)ret>=0 times:1} +-│ │ │ ├─────sched_switch{(local: 150380, 17.308%, 150380.000)(global:150380, 17.308% ,150380.000), times:1, (int)ret>=0 times:1} ++以此结果为例,该进程是一个udp非阻塞收包进程。 +++ `u0_recvfrom` 为该进程在运行后执行的用户态函数,`u0_` 前缀表示第一个应用程序的函数,实际函数名为`recvfrom`;`sched_switch` 为调度函数;其余函数为内核函数 +++ `global` 和 `local` 对应的括号内为该函数执行的时延信息,其中 `local` 是剔除掉子函数和调度所执行的时间 , `global` 为该函数实际执行时长 +++ 每个函数的 `global` 和 `local` 的括号内三个信息分别为,时延,该时延所占进程全部时间的百分比,平均时延(时延/执行次数) +++ 每一级函数的 `global` 时间等于本级函数`local`时间与下一级所有函数的 `global` 时间之和 +++ `times` 是该函数调用栈的次数, +++ `(int)ret>=0 times`:表示该函数返回值转换为`int`后大于等于0的次数,无返回值函数返回值是无效的值 +++ 以上时间单位为微秒 ++ ++#### 文件夹结果介绍 ++ ++**时延和调用关系信息**:`/var/da-tool/analysis_output/output/func_delay_stack` ++``` ++# 此部分信息为终端输出结果的文件格式存储 ++pid_222459;sched_switch 13160, localDelaySum ,13160, localAvedelay ,453.793103, localPercentage, 1.316%, globalDelaySum ,13160, globalAvedelay, 453.793103, globalPercentage, 1.316%, times , 29, (int)ret>=0 times ,29 ++pid_222459;u0_recvfrom;sched_switch 2927, localDelaySum ,2927, localAvedelay ,209.071429, localPercentage, 0.293%, globalDelaySum ,2927, globalAvedelay, 209.071429, globalPercentage, 0.293%, times , 14, (int)ret>=0 times ,14 ++pid_222459;u0_recvfrom 422312, localDelaySum ,422312, localAvedelay ,10.728926, localPercentage, 42.217%, globalDelaySum ,537146, globalAvedelay, 13.646309, globalPercentage, 53.696%, times ,39362, (int)ret>=0 times ,20 ++pid_222459;u0_recvfrom;sock_recvmsg 55313, localDelaySum ,55313, localAvedelay ,1.405239, localPercentage, 5.529%, globalDelaySum ,111907, globalAvedelay, 2.843021, globalPercentage, 11.187%, times ,39362, (int)ret>=0 times ,20 ++pid_222459;u0_recvfrom;sock_recvmsg;udp_recvmsg 36357, localDelaySum ,36357, localAvedelay ,0.923657, localPercentage, 3.634%, globalDelaySum ,56594, globalAvedelay, 1.437783, globalPercentage, 5.657%, times ,39362, (int)ret>=0 times ,20 ++pid_222459;u0_recvfrom;sock_recvmsg;udp_recvmsg;__skb_recv_udp 20237, localDelaySum ,20237, localAvedelay ,0.514125, localPercentage, 2.023%, globalDelaySum ,20237, globalAvedelay, 0.514125, globalPercentage, 2.023%, times ,39362, (int)ret>=0 times ,39342 + ``` +-以此结果为例,`u0__Z5func1v` 和 `sched_switch` 为 该进程在运行期间执行的函数,`sched_switch`执行周期在 `u0__Z5func1v` 周期内,`(local: 19, 0.002%, 19.000)` 表示该函数剔除子函数和调度所执行的时间,三个参数分别为,总时间、所占整个pid有效时间的百分比,平均时间,`global` 为不剔除子函数所占的时间,`times` 是该函数调用栈的次数,`(int)ret>=0 times`:表示该函数返回值转换为(int)后大于等于0的次数,无返回值函数返回值认为是0. +-以上时间单位为微秒。 +-## 注意 +-+ 确保trace 中有需要采集的函数的完整调用栈 +-+ 采样时间和函数需要人为评估,某些函数短时间可能采到大量trace,日志过大,解析过慢 ++ ++**调度信息**:`/var/da-tool/analysis_output/output/process_sched_info` ++``` ++# delaySum : 该pid分析的总时长 ++# schedSwitchDelay : 调度所占的时间 ++# runtime :delaySum - schedSwitchDelay ++# cpuSwitchTimes : 该pid从一个核切换到另一个核的次数 ++# core 2, run time 704927 : 表示在cpu2 上运行时长为 704927 ++# startTime,67551.691078,endTime,67551.701193,coreIndex,2 :在这个时间段内在cpu2上运行 ++# coreIndex,-1 表示该pid被切走的时长(sched_switch) ++ ++pid,222459,delaySum ,1000368,schedSwitchDelay ,37201,runtime ,963167,cpuSwitchTimes ,1, ++ core 2, run time 704927 ++ core 3, run time 258240 ++startTime,67551.691078,endTime,67551.701193,coreIndex,2 ++startTime,67551.701193,endTime,67551.701970,coreIndex,-1 ++startTime,67551.701970,endTime,67551.702503,coreIndex,2 ++startTime,67551.702503,endTime,67551.713700,coreIndex,-1 ++startTime,67551.713700,endTime,67551.723964,coreIndex,2 ++startTime,67551.723964,endTime,67551.724119,coreIndex,-1 ++... ++ ++``` ++**时延分析总结报告**:`/var/da-tool/analysis_output/output/summary_delay.csv` ++ ++包含信息如下,其中`(r>=0)`表示函数返回值转成`int`后大于等于0的情况。 ++`ave,sum,min,max,p50,p80,p95,p99`等为时延信息的平均值、总和、极值、各百分位下的数值。 ++``` ++pid,function,call_times,ave,sum,min,max,p50,p80,p95,p99, ++call_times(r>=0),ave(r>=0),sum(r>=0),min(r>=0),max(r>=0),p50(r>=0),p80(r>=0),p95(r>=0),p99(r>=0), ++call_times(r<0),ave(r<0),sum(r<0),min(r<0),max(r<0),p50(r<0),p80(r<0),p95(r<0),p99(r<0), ++``` ++ ++**调度分析总结报告**:`/var/da-tool/analysis_output/output/summary_sched.csv` ++``` ++pid,delaySum,schedSwitchDelay,schedSwitchPercentage,schedSwitchTimes,cpuSwitchTimes ++``` +++ delaySum : 总耗时 +++ schedSwitchDelay : 调度总耗时 +++ schedSwitchPercentage : schedSwitchDelay 占 delaySum 的百分比 +++ schedSwitchTimes : 调度次数 +++ cpuSwitchTimes : cpu 切换次数 + + ### 扩展功能 +-da-tool 生成的结果信息可调用 火焰图生成工具,可视化分析结果, +-`./flamegraph.pl` 可在 https://gitee.com/mirrors/FlameGraph 中获取 ++`da-tool` 生成的结果信息可调用 火焰图生成工具,可视化分析结果, ++`./flamegraph.pl` 可在 `https://gitee.com/mirrors/FlameGraph` 中获取 + ```shell + # 全部信息 + cat /var/da-tool/analysis_output/output/func_delay_stack | grep -o '^[^,]*' | ./flamegraph.pl --countname "delay sum" > allpid.svg +@@ -118,15 +199,20 @@ Error: event "aaaa" already exists. + Error: Failed to add events. + ``` + ++采集后会在`/var/da-tool/tmp`文件夹下生成一个`output_时间`格式的文件夹,包含此次采样的结果。 ++采样脚本的采样日志在此路径下: ++``` ++/var/da-tool/tmp/sample_output_时间/da-tool/sample.log ++``` ++ + ### 注意事项 + + 配置注意事项 + + 配置`/etc/da-tool.conf` 时,注意所配置符号一定存在 +- + 内核符号可在`/proc/kallsyms` 中查看,用户态程序符号 可用`objdump -d 二进制 | grep 函数名` 匹配正确的符号 +- + 某些函数名可能存在点(eg:A.B.C),暂**不支持配置此类函数**,例如经过gcc优化选项`-fipa-sra`优化后,函数符号后缀会有`.rsra.num`。 ++ + 某些函数名可能存在点(eg:A.B.C),暂**不支持配置此类函数**,例如经过gcc优化选项`-fipa-sra`优化后,函数符号后缀会有`.rsra.num`。 ++ + 应用程序名也不要有点,建议函数和应用程序**不要包含特殊符号** + + 某些函数可能短时间执行大量次数,此时`trace`很大,解析时间会很长,需要认为评估配置的函数运行情况,合理设置采样时间 + + 由于`trace`可能存在不完整的调用关系,很有可能在`trace`中存在的数据分析时舍弃,如果单次采样没有抓到需要的pid信息,建议多采样几次 +- + 有时`trace`中会有数据丢失,结果可能异常,常见的异常原因为`trace`过大,内核刷新数据时不正常,建议减小采样时间。 ++ + 有时`trace`中会有数据丢失,结果可能异常,常见的异常原因为`trace`过大,内核刷新数据时不正常,比如会出现同一个函数只有返回时间没有进入时间的现象,建议减小采样时间。 + + 不支持递归函数 +- +- +- +++ 本工具单个CPU所使用的跟踪缓存`RingBuffer`大小为 `40960kb` ,当单核的`trace`大小接近此值时数据可能异常,进而导致分析结果错误。 +++ 确保`trace` 中有需要采集的函数的完整调用栈 +diff --git a/extra-tools/da-tool/analysis/config.cpp b/extra-tools/da-tool/analysis/config.cpp +index ecbae2d..08420ad 100644 +--- a/extra-tools/da-tool/analysis/config.cpp ++++ b/extra-tools/da-tool/analysis/config.cpp +@@ -38,15 +38,16 @@ void Config::pathInit() + filename[FILE_TYPE_FUNC_CFG] = pathInput + "/analysis_config"; + + // output +- filename[FILE_TYPE_OUTPUT_DELAY] = pathOutput + "/delay.csv"; ++ filename[FILE_TYPE_OUTPUT_DELAY] = pathOutput + "/summary_delay.csv"; + filename[FILE_TYPE_OUTPUT_FUNC_STACK_DELALY] = pathOutput + "/func_delay_stack"; + filename[FILE_TYPE_OUTPUT_PROCESS_SCHED_INFO] = pathOutput + "/process_sched_info"; ++ filename[FILE_TYPE_OUTPUT_SUMMARY_SCHED_INFO] = pathOutput + "/summary_sched.csv"; + + // debug + filename[FILE_TYPE_OUTPUT_RUN_LOG] = pathOutputDebug + "/run.log"; + filename[FILE_TYPE_OUTPUT_FUNC_STACK_ALL_INFO] = pathOutput + "/func_stack_all_info"; +- filename[FILE_TYPE_DEBUG_TIME_PAIE] = pathOutputDebug + "/debug_time_pair.csv"; +- filename[FILE_TYPE_DEBUG_TRACE] = pathOutputDebug + "/debug_trace.csv"; ++ filename[FILE_TYPE_DEBUG_TIME_PAIE] = pathOutputDebug + "/debug_time_pair"; ++ filename[FILE_TYPE_DEBUG_TRACE] = pathOutputDebug + "/debug_trace"; + filename[FILE_TYPE_DEBUG_FUNC_STACK_TRACE] = pathOutputDebug + "/debug_funcstk_trace"; + filename[FILE_TYPE_DEBUG_REGEX] = pathOutputDebug + "/debug_resolve_function_trace"; + filename[FILE_TYPE_DEBUG_CONFIG] = pathOutputDebug + "/debug_config_resolve"; +@@ -161,7 +162,7 @@ void Config::configInit(int argc, char *argv[]) + case 'g': + if (std::stoi(optarg) < DEBUG_LEVEL_MAX) { + debugLevel = (DEBUG_LEVEL_E)std::stoi(optarg); +- } else { ++ } else { + std::cout << "debugLevel error" << std::endl; + } + std::cout << "debugLevel : " << debugLevel << std::endl; +@@ -172,7 +173,7 @@ void Config::configInit(int argc, char *argv[]) + default: + std::cout << "Unrecognized option" << std::endl; + break; +- } ++ } + } + + for (int i = optind; i < argc; ++i) { +diff --git a/extra-tools/da-tool/analysis/config.h b/extra-tools/da-tool/analysis/config.h +index 53b20dd..ccce0f2 100644 +--- a/extra-tools/da-tool/analysis/config.h ++++ b/extra-tools/da-tool/analysis/config.h +@@ -28,6 +28,7 @@ typedef enum { + FILE_TYPE_OUTPUT_FUNC_STACK_DELALY, + FILE_TYPE_OUTPUT_FUNC_STACK_ALL_INFO, + FILE_TYPE_OUTPUT_PROCESS_SCHED_INFO, ++ FILE_TYPE_OUTPUT_SUMMARY_SCHED_INFO, + + // debug 1 + FILE_TYPE_DEBUG_CONFIG, +@@ -36,10 +37,11 @@ typedef enum { + // debug 3 + FILE_TYPE_DEBUG_TRACE, + FILE_TYPE_DEBUG_REGEX, +- FILE_TYPE_DEBUG_FUNC_STACK_TRACE, + FILE_TYPE_DEBUG_TIME_PAIR_ALIGN, + FILE_TYPE_DEBUG_TIME_PAIE, + FILE_TYPE_DEBUG_DELAY_FUNC_STACK_TRACE, ++ // debug 4 ++ FILE_TYPE_DEBUG_FUNC_STACK_TRACE, + FILE_TYPE_MAX, + } FILE_TYPE_E; + +diff --git a/extra-tools/da-tool/analysis/function_stack.cpp b/extra-tools/da-tool/analysis/function_stack.cpp +index 3841627..fcc2a8d 100644 +--- a/extra-tools/da-tool/analysis/function_stack.cpp ++++ b/extra-tools/da-tool/analysis/function_stack.cpp +@@ -89,7 +89,7 @@ void FunctionStack::stackMapInit() + int maxDelay = 0; + + for (int i = 0; i < len; i++) { +- if (funcInfo.second.isInvalid[i] == 1 || delayMap[pid][functionIndex].isStackFinish[i] == true) { ++ if (funcInfo.second.isInvalid[i] == true || delayMap[pid][functionIndex].isStackFinish[i] == true) { + if (cfg.getDebugLevel() >= DEBUG_LEVEL_4) { + debugFile << "pid," << pid << ",functionIndex," << functionIndex << ",invalid" << std::endl; + } +@@ -106,7 +106,7 @@ void FunctionStack::stackMapInit() + } + + // The time pair has already been calculated, skip next time +- delayMap[pid][functionIndex].isStackFinish[i] = true; ++ delayMap[pid][functionIndex].isStackFinish[i] = true; + + std::string strFunctionStk = funcInfo.second.strFunctionStk[i]; + int fatherFunction = funcInfo.second.fatherFunction[i]; +@@ -170,13 +170,22 @@ void FunctionStack::saveFunctionStackToFile() + } + + for (const auto &processInfo : funcStackMap) { +- ++ int pid = processInfo.first; ++ if (cfg.filterCfgMap.size() != 0 && cfg.filterCfgMap.count(pid) == 0) { ++ continue; ++ } ++ if (processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL] <= 0) { ++ continue; ++ } ++ file << "pid_" + std::to_string(pid); ++ file << "; " << processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL]; // for flame graph ++ file << ",localDelaySum," << processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL]; ++ file << ",localPercentage," << std::fixed << std::setprecision(3) << processDelayMap[pid].percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%"; ++ file << ",globalDelaySum," << processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL]; ++ file << ",globalPercentage," << std::fixed << std::setprecision(3) << processDelayMap[pid].percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "%"; ++ file << std::endl; + for (const auto &stkInfo : processInfo.second) { +- int pid = processInfo.first; +- if (cfg.filterCfgMap.size() != 0 && cfg.filterCfgMap.count(pid) == 0) { +- continue; +- +- } ++ + file << "pid_" + std::to_string(pid); + std::stringstream ss(stkInfo.first); + std::string token; +@@ -188,14 +197,14 @@ void FunctionStack::saveFunctionStackToFile() + } + + file << " " << stkInfo.second.delaySum[FS_DELAY_TYPE_LOCAL]; // for flame graph +- file << ", localDelaySum ," << stkInfo.second.delaySum[FS_DELAY_TYPE_LOCAL]; +- file << ", localAvedelay ," << std::fixed << std::setprecision(6) << stkInfo.second.aveDelay[FS_DELAY_TYPE_LOCAL]; +- file << ", localPercentage, " << std::fixed << std::setprecision(3) << stkInfo.second.percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%"; +- file << ", globalDelaySum ," << stkInfo.second.delaySum[FS_DELAY_TYPE_GLOBAL]; +- file << ", globalAvedelay, " << std::fixed << std::setprecision(6) << stkInfo.second.aveDelay[FS_DELAY_TYPE_GLOBAL]; +- file << ", globalPercentage, " << std::fixed << std::setprecision(3) << stkInfo.second.percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "%"; +- file << ", times ," << std::setw(5) << std::setfill(' ') << stkInfo.second.num; +- file << ", (int)ret>=0 times ," << stkInfo.second.num - stkInfo.second.retValLessZeroTimes; ++ file << ",localDelaySum," << stkInfo.second.delaySum[FS_DELAY_TYPE_LOCAL]; ++ file << ",localAvedelay," << std::fixed << std::setprecision(6) << stkInfo.second.aveDelay[FS_DELAY_TYPE_LOCAL]; ++ file << ",localPercentage," << std::fixed << std::setprecision(3) << stkInfo.second.percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%"; ++ file << ",globalDelaySum," << stkInfo.second.delaySum[FS_DELAY_TYPE_GLOBAL]; ++ file << ",globalAvedelay," << std::fixed << std::setprecision(6) << stkInfo.second.aveDelay[FS_DELAY_TYPE_GLOBAL]; ++ file << ",globalPercentage," << std::fixed << std::setprecision(3) << stkInfo.second.percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "%"; ++ file << ",times ," << std::setw(5) << std::setfill(' ') << stkInfo.second.num; ++ file << ",(int)ret>=0 times," << stkInfo.second.num - stkInfo.second.retValLessZeroTimes; + + file << std::endl; + } +@@ -209,11 +218,7 @@ std::string getFatherFuncStk(const std::string &strFunctionStk) + { + size_t lastDotPos = strFunctionStk.find_last_of('.'); + if (lastDotPos != std::string::npos) { +- if (lastDotPos == 0) { +- return ".0"; +- } else { +- return strFunctionStk.substr(0, lastDotPos); +- } ++ return strFunctionStk.substr(0, lastDotPos); + } else { + return ""; + } +@@ -221,7 +226,6 @@ std::string getFatherFuncStk(const std::string &strFunctionStk) + + void FunctionStack::stackNodeMapInit() + { +- + for (const auto &processInfo : funcStackMap) { + int pid = processInfo.first; + if (stackNodeMap.count(pid) == 0) { +@@ -230,11 +234,7 @@ void FunctionStack::stackNodeMapInit() + } + + for (const auto &stkInfo : processInfo.second) { +- std::string strFunctionStk = stkInfo.first; +- if (stackNodeMap[pid].count(strFunctionStk) != 0) { +- StackNode node_tmp; +- stackNodeMap[pid].emplace(strFunctionStk, node_tmp); +- } ++ std::string strFunctionStk = ".0" + stkInfo.first; + int func_index_tmp = 0; + std::stringstream ss(strFunctionStk); + std::string token; +@@ -243,67 +243,111 @@ void FunctionStack::stackNodeMapInit() + func_index_tmp = std::stoi(token); + } + } +- stackNodeMap[pid][strFunctionStk].functionIndex = func_index_tmp; ++ + std::string fatherFuncStk = getFatherFuncStk(strFunctionStk); ++ StackNode node_tmp; ++ if (stackNodeMap[pid].count(strFunctionStk) == 0) { ++ stackNodeMap[pid].emplace(strFunctionStk, node_tmp); ++ } ++ if (stackNodeMap[pid].count(fatherFuncStk) == 0) { ++ stackNodeMap[pid].emplace(fatherFuncStk, node_tmp); ++ } ++ stackNodeMap[pid][strFunctionStk].functionIndex = func_index_tmp; + stackNodeMap[pid][fatherFuncStk].nextStack.emplace_back(strFunctionStk); + } + } + } + ++std::string removeRootStk(std::string strFunctionStk) ++{ ++ return strFunctionStk.substr(2); // remove ".0" ++} + +-void FunctionStack::stackNodeMapDfs(int pid, int functionIndex, std::string strFunctionStk, int space_len) ++void FunctionStack::stackNodeMapDfs(int pid, bool endFlag, std::string strFunctionStk, std::string headStr) + { ++ std::string headStrTmp = headStr; + Config &cfg = Config::getInstance(); +- TimePair &tpInst = TimePair::getInstance(); + if (strFunctionStk == ".0") { +- std::cout << "├──pid:" << pid; +- int pidDelay = tpInst.getProcessValidTime(pid); +- if (pidDelay > 0) { +- std::cout << "{(" << tpInst.getProcessValidTime(pid) << ",100.000%)}"; ++ std::cout << "├──pid: " << pid; ++ if (processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL] > 0) { ++ std::cout << "{"; ++ std::cout << "local:(" << processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL] << ", "; ++ std::cout << std::fixed << std::setprecision(3) << processDelayMap[pid].percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%)"; ++ std::cout << ", global:(" << processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL] << ", "; ++ std::cout << std::fixed << std::setprecision(3) << processDelayMap[pid].percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "%)"; ++ std::cout << "}"; + } else { + std::cout << " data invalid!!!"; + } +- + std::cout << std::endl; + } else { +- +- for (int i = 0; i < space_len; i++) { +- if (i % SPLIT_SPACE_LEN == 0) +- +- +- { +- std::cout << "│"; +- } ++ std::cout << "│"; ++ if (endFlag == false) { ++ headStrTmp += "│"; ++ } ++ for (int i = 1; i < SPLIT_SPACE_LEN; i++) { + std::cout << " "; ++ headStrTmp += " "; ++ } ++ std::cout << headStr; ++ if (endFlag == false) { ++ std::cout << "├─────"; ++ } else { ++ std::cout << "└─────"; + } +- std::cout << "├─────" << cfg.IndexToFunction[stackNodeMap[pid][strFunctionStk].functionIndex]; ++ ++ std::cout << cfg.IndexToFunction[stackNodeMap[pid][strFunctionStk].functionIndex]; + std::cout << "{"; +- std::cout << "(local: " << funcStackMap[pid][strFunctionStk].delaySum[FS_DELAY_TYPE_LOCAL] << ", " << std::fixed << std::setprecision(3) << funcStackMap[pid][strFunctionStk].percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%, " << funcStackMap[pid][strFunctionStk].aveDelay[FS_DELAY_TYPE_LOCAL] << ")"; +- std::cout << "(global:" << funcStackMap[pid][strFunctionStk].delaySum[FS_DELAY_TYPE_GLOBAL] << ", " << std::fixed << std::setprecision(3) << funcStackMap[pid][strFunctionStk].percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "% ," << funcStackMap[pid][strFunctionStk].aveDelay[FS_DELAY_TYPE_GLOBAL] << ")"; +- std::cout << ", times:" << funcStackMap[pid][strFunctionStk].num; +- std::cout << ", (int)ret>=0 times:" << funcStackMap[pid][strFunctionStk].num - funcStackMap[pid][strFunctionStk].retValLessZeroTimes; ++ std::string noRootStk = removeRootStk(strFunctionStk); ++ std::cout << "local:(" << funcStackMap[pid][noRootStk].delaySum[FS_DELAY_TYPE_LOCAL] << ", "; ++ std::cout << std::fixed << std::setprecision(3) << funcStackMap[pid][noRootStk].percentage[FS_DELAY_TYPE_LOCAL] * 100 << "%, "; ++ std::cout << funcStackMap[pid][noRootStk].aveDelay[FS_DELAY_TYPE_LOCAL] << ")"; ++ std::cout << ", global:(" << funcStackMap[pid][noRootStk].delaySum[FS_DELAY_TYPE_GLOBAL] << ", "; ++ std::cout << std::fixed << std::setprecision(3) << funcStackMap[pid][noRootStk].percentage[FS_DELAY_TYPE_GLOBAL] * 100 << "%, "; ++ std::cout << funcStackMap[pid][noRootStk].aveDelay[FS_DELAY_TYPE_GLOBAL] << ")"; ++ std::cout << ", times:" << funcStackMap[pid][noRootStk].num; ++ std::cout << ", (int)ret>=0 times:" << funcStackMap[pid][noRootStk].num - funcStackMap[pid][noRootStk].retValLessZeroTimes; + std::cout << "}" << std::endl; + } + +- for (const auto &nextStack : stackNodeMap[pid][strFunctionStk].nextStack) { +- stackNodeMapDfs(pid, stackNodeMap[pid][strFunctionStk].functionIndex, nextStack, space_len + SPLIT_SPACE_LEN); ++ int len = stackNodeMap[pid][strFunctionStk].nextStack.size(); ++ for (int i = 0; i < len; i++) { ++ stackNodeMapDfs(pid, i == (len - 1), stackNodeMap[pid][strFunctionStk].nextStack[i], headStrTmp); + } ++} + ++void FunctionStack::processDelayAnalysis() ++{ ++ TimePair &tpInst = TimePair::getInstance(); ++ for (const auto &processInfo : stackNodeMap) { ++ int pid = processInfo.first; ++ if (processDelayMap.count(pid) == 0) { ++ ProcessDelay delaytmp; ++ processDelayMap.emplace(pid, delaytmp); ++ } ++ processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL] = tpInst.getProcessValidTime(pid); ++ processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL] = tpInst.getProcessValidTime(pid); ++ for (const auto &firstStack : stackNodeMap[pid][".0"].nextStack) { ++ std::string noRootStk = removeRootStk(firstStack); ++ processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL] -= funcStackMap[pid][noRootStk].delaySum[FS_DELAY_TYPE_GLOBAL]; ++ ++ } ++ processDelayMap[pid].percentage[FS_DELAY_TYPE_LOCAL] = \ ++ processDelayMap[pid].delaySum[FS_DELAY_TYPE_LOCAL] * 1.0 / processDelayMap[pid].delaySum[FS_DELAY_TYPE_GLOBAL]; ++ processDelayMap[pid].percentage[FS_DELAY_TYPE_GLOBAL] = 1.0; ++ } + } + + void FunctionStack::stackNodeMapDisplay() + { + Config &cfg = Config::getInstance(); + std::cout << "Display the function delay of each pid " << std::endl; +- // std::cout << "format:function symbol{( delay sum (microsecond) ,percentage(occupy the entire pid runtime) ),average delay | num in trace}" << std::endl; + for (const auto &processInfo : stackNodeMap) { + int pid = processInfo.first; + if (cfg.filterCfgMap.size() == 0 || cfg.filterCfgMap.count(pid) != 0) { +- std::cout << "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────" << std::endl; +- stackNodeMapDfs(processInfo.first, 0, ".0", SPLIT_SPACE_LEN); +- std::cout << "───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────" << std::endl; ++ std::cout << "│" << std::endl; ++ stackNodeMapDfs(processInfo.first, 0, ".0", ""); + } +- + } + std::cout << std::endl; + } +@@ -313,8 +357,9 @@ void FunctionStack::function_stack_proc() + delayMapInit(); + stackMapInit(); + stackMapAnalysis(); +- saveFunctionStackToFile(); + + stackNodeMapInit(); ++ processDelayAnalysis(); + stackNodeMapDisplay(); ++ saveFunctionStackToFile(); + } +\ No newline at end of file +diff --git a/extra-tools/da-tool/analysis/function_stack.h b/extra-tools/da-tool/analysis/function_stack.h +index 34781c3..9f6e2d7 100644 +--- a/extra-tools/da-tool/analysis/function_stack.h ++++ b/extra-tools/da-tool/analysis/function_stack.h +@@ -52,8 +52,15 @@ public: + std::vector delay; + }; + ++class ProcessDelay { ++public: ++ int delaySum[FS_DELAY_TYPE_MAX]; ++ double percentage[FS_DELAY_TYPE_MAX]; ++}; ++ + class StackNode { + public: ++ // if stk .0.1.2.3 , then .0.1's nextStack is .0.1.2 , functionIndex is 1 + int functionIndex; + std::vector nextStack; + }; +@@ -70,24 +77,24 @@ public: + ~FunctionStack() {} + + private: ++ std::unordered_map processDelayMap; + std::unordered_map> + funcStackMap; // [pid][strFunctionStk] + std::unordered_map> + delayMap; // [pid][functionIndex] , copy from trace_reslove + void delayMapInit(); + void stackMapInit(); ++ void processDelayAnalysis(); + + void stackMapAnalysis(); + void saveFunctionStackToFile(); + + private: // stack node + std::unordered_map> +- stackNodeMap; // [pid][strFunctionStk] ++ stackNodeMap; // [pid][".0" + strFunctionStk] + void stackNodeMapInit(); + void stackNodeMapDisplay(); +- void stackNodeMapDfs(int pid, int functionIndex, std::string strFunctionStk, +- int space_len); +- ++ void stackNodeMapDfs(int pid, bool endFlag, std::string strFunctionStk, std::string headStr); + public: + void function_stack_proc(); + }; +diff --git a/extra-tools/da-tool/analysis/sched_analysis.cpp b/extra-tools/da-tool/analysis/sched_analysis.cpp +index ba8c49f..aea3d25 100644 +--- a/extra-tools/da-tool/analysis/sched_analysis.cpp ++++ b/extra-tools/da-tool/analysis/sched_analysis.cpp +@@ -24,47 +24,21 @@ SchedAnalysis::SchedAnalysis() + + } + +-void SchedAnalysis::processSchedAnalysisLoop(const int &pid, const int ×tamp, const int &coreIndex, LINE_TYPE_E lineType) ++void SchedAnalysis::processSchedAnalysisLoop(const int &pid, const int ×tamp, const int &coreIndex) + { + if (processSchedMap.count(pid) != 0) { + ProcessSchedInfo tmp; + processSchedMap.emplace(pid, tmp); + } + int size = processSchedMap[pid].coreTrace.size(); +- if (lineType == LINE_TYPE_FUNC) { +- if (size == 0) { +- ProcessCoreTrace pct; +- pct.startTime = timestamp; +- pct.endTime = 0; +- pct.coreIndex = coreIndex; +- processSchedMap[pid].coreTrace.emplace_back(pct); +- } else { +- processSchedMap[pid].coreTrace[size - 1].endTime = timestamp; +- } +- +- } else if (lineType == LINE_TYPE_SCHED_SWITCH) // pid1 - > pidn +- { +- if (size != 0) { +- processSchedMap[pid].coreTrace[size - 1].endTime = timestamp; +- } +- ProcessCoreTrace pct; +- pct.startTime = timestamp; +- pct.endTime = timestamp; +- pct.coreIndex = -1; +- processSchedMap[pid].coreTrace.emplace_back(pct); +- +- } else if (lineType == LINE_TYPE_SCHED_SWITCH_RET) // pidm - > pid1 +- { +- if (size != 0) { +- processSchedMap[pid].coreTrace[size - 1].endTime = timestamp; +- } +- ProcessCoreTrace pct; +- pct.startTime = timestamp; +- pct.endTime = timestamp; +- pct.coreIndex = coreIndex; +- processSchedMap[pid].coreTrace.emplace_back(pct); ++ ProcessCoreTrace pct; ++ pct.startTime = timestamp; ++ pct.endTime = timestamp; ++ pct.coreIndex = coreIndex; ++ if (size != 0) { ++ processSchedMap[pid].coreTrace[size - 1].endTime = timestamp; + } +- ++ processSchedMap[pid].coreTrace.emplace_back(pct); + } + + void SchedAnalysis::schedInfoProc() +@@ -82,19 +56,17 @@ void SchedAnalysis::schedInfoProc() + for (const auto &line_info_tmp : slv.getTraceLine()) { + std::string functionName = line_info_tmp.functionName; + int pid = line_info_tmp.pid; +- if (cfg.funcCfgMap.count(functionName) == 0 || pid == 0) { ++ if (cfg.funcCfgMap.count(functionName) == 0) { + continue; + } + int timestamp = line_info_tmp.timestamp; + int coreIndex = line_info_tmp.core; + int functionIndex = cfg.funcCfgMap[functionName].functionIndex; + +- if (functionIndex != sched_switch_funcidx) { +- processSchedAnalysisLoop(pid, timestamp, coreIndex, LINE_TYPE_FUNC); +- } else { ++ if (functionIndex == sched_switch_funcidx) { + int nextPid = line_info_tmp.schedSwitchLine.nextPid; +- processSchedAnalysisLoop(pid, timestamp, coreIndex, LINE_TYPE_SCHED_SWITCH); +- processSchedAnalysisLoop(nextPid, timestamp, coreIndex, LINE_TYPE_SCHED_SWITCH_RET); ++ processSchedAnalysisLoop(pid, timestamp, -1); // pid1 - > pidn ++ processSchedAnalysisLoop(nextPid, timestamp, coreIndex); // pidm - > pid1 + } + } + } +@@ -105,13 +77,15 @@ void SchedAnalysis::schedInfoAnalysis() + int lastCoreIndex = -1; + int delaySum = 0; + int delaySched = 0; +- int cpuSwichTimes = 0; ++ int schedSwitchTimes = 0; ++ int cpuSwitchTimes = 0; + for (auto &coreTrace : sched_tmp.second.coreTrace) { + int delay = coreTrace.endTime - coreTrace.startTime; + int coreIndex = coreTrace.coreIndex; + delaySum += delay; + if (coreIndex == -1) { + delaySched += delay; ++ schedSwitchTimes++; + } else { + sched_tmp.second.runTimeOfCore[coreIndex] += delay; + } +@@ -121,13 +95,14 @@ void SchedAnalysis::schedInfoAnalysis() + } + + if (lastCoreIndex != coreIndex && coreIndex != -1) { +- cpuSwichTimes++; ++ cpuSwitchTimes++; + lastCoreIndex = coreTrace.coreIndex; + } +- + } + sched_tmp.second.schedSwitchDelay = delaySched; +- sched_tmp.second.cpuSwichTimes = cpuSwichTimes; ++ sched_tmp.second.schedSwitchTimes = schedSwitchTimes; ++ sched_tmp.second.percentageSchedSwitch = delaySum == 0? 0.0 : delaySched * 1.0 / delaySum; ++ sched_tmp.second.cpuSwitchTimes = cpuSwitchTimes; + sched_tmp.second.delaySum = delaySum; + } + } +@@ -147,10 +122,10 @@ void SchedAnalysis::saveSchedInfoToFile() + continue; + } + file << "pid," << pid << ","; +- file << "delaySum ," << sched_tmp.second.delaySum << ","; +- file << "schedSwitchDelay ," << sched_tmp.second.schedSwitchDelay << ","; +- file << "runtime ," << sched_tmp.second.delaySum - sched_tmp.second.schedSwitchDelay << ","; +- file << "cpuSwichTimes ," << sched_tmp.second.cpuSwichTimes << ","; ++ file << "delaySum," << sched_tmp.second.delaySum << ","; ++ file << "schedSwitchDelay," << sched_tmp.second.schedSwitchDelay << ","; ++ file << "runtime," << sched_tmp.second.delaySum - sched_tmp.second.schedSwitchDelay << ","; ++ file << "cpuSwitchTimes," << sched_tmp.second.cpuSwitchTimes << ","; + file << std::endl; + for (int coreIndex = 0; coreIndex < sched_tmp.second.runTimeOfCore.size(); coreIndex++) { + int run_time = sched_tmp.second.runTimeOfCore[coreIndex]; +@@ -171,9 +146,38 @@ void SchedAnalysis::saveSchedInfoToFile() + file.close(); + } + ++void SchedAnalysis::saveSchedInfoSummaryToFile() ++{ ++ Config &cfg = Config::getInstance(); ++ std::ofstream file(cfg.filename[FILE_TYPE_OUTPUT_SUMMARY_SCHED_INFO], std::ios::out | std::ios::trunc); ++ if (!file) { ++ std::cout << "file open failed:" << cfg.filename[FILE_TYPE_OUTPUT_SUMMARY_SCHED_INFO] << std::endl; ++ return; ++ } ++ file << "pid,delaySum,schedSwitchDelay,schedSwitchPercentage,schedSwitchTimes,cpuSwitchTimes"; ++ file << std::endl; ++ TraceResolve &slv = TraceResolve::getInstance(); ++ for (const auto &sched_tmp : processSchedMap) { ++ int pid = sched_tmp.first; ++ if (pid == 0) { ++ continue; ++ } ++ file << pid << ","; ++ file << sched_tmp.second.delaySum << ","; ++ file << sched_tmp.second.schedSwitchDelay << ","; ++ file << std::fixed << std::setprecision(3) << sched_tmp.second.percentageSchedSwitch * 100 << "%,"; ++ file << sched_tmp.second.schedSwitchTimes << ","; ++ file << sched_tmp.second.cpuSwitchTimes << ","; ++ file << std::endl; ++ } ++ ++ file.close(); ++} ++ + void SchedAnalysis::schedAnalysisProc() + { + schedInfoProc(); + schedInfoAnalysis(); + saveSchedInfoToFile(); ++ saveSchedInfoSummaryToFile(); + } +diff --git a/extra-tools/da-tool/analysis/sched_analysis.h b/extra-tools/da-tool/analysis/sched_analysis.h +index dd35764..85036ce 100644 +--- a/extra-tools/da-tool/analysis/sched_analysis.h ++++ b/extra-tools/da-tool/analysis/sched_analysis.h +@@ -34,7 +34,9 @@ public: + std::vector + coreTrace; // CPU information of pid in each time period + int schedSwitchDelay; +- int cpuSwichTimes; ++ int schedSwitchTimes; ++ double percentageSchedSwitch; ++ int cpuSwitchTimes; + int delaySum; + }; + +@@ -68,10 +70,11 @@ private: // process sched info + std::unordered_map processSchedMap; // [pid] + // std::vector > allCpuSchedInfo; // [coreIndex] + void processSchedAnalysisLoop(const int &pid, const int ×tamp, +- const int &coreIndex, LINE_TYPE_E line_type); ++ const int &coreIndex); + void schedInfoProc(); + void schedInfoAnalysis(); + void saveSchedInfoToFile(); ++ void saveSchedInfoSummaryToFile(); + + public: + void schedAnalysisProc(); +diff --git a/extra-tools/da-tool/analysis/time_pair.cpp b/extra-tools/da-tool/analysis/time_pair.cpp +index b9e16a6..037bce1 100644 +--- a/extra-tools/da-tool/analysis/time_pair.cpp ++++ b/extra-tools/da-tool/analysis/time_pair.cpp +@@ -48,6 +48,16 @@ typedef enum { + TRACE_INFO_SHCEMAX, + } TRACE_INFO_SCHED_SWITCH_E; + ++typedef enum { ++ DEBUG_POS_0, ++ DEBUG_POS_1, ++ DEBUG_POS_2, ++ DEBUG_POS_3, ++ DEBUG_POS_4, ++ DEBUG_POS_5, ++ DEBUG_POS_MAX, ++} DEBUG_POS_E; ++ + TimePair::TimePair() + { + } +@@ -91,38 +101,35 @@ void TimePair::saveFuncStkDebugToFile(std::ofstream &file, const int &pid, const + } + int TimePair::getFatherFunctionIdLoop(const int &pid, const int &functionIndex, const int &isRet, int &debugPos) + { +- debugPos = 0; ++ debugPos = DEBUG_POS_0; + +- if (funcStkMap.count(pid) == 0) +- { ++ if (funcStkMap.count(pid) == 0) { + std::vector tmp; + funcStkMap.emplace(pid, tmp); + } + +- if (funcStkMap[pid].size() == 0) +- { ++ if (funcStkMap[pid].size() == 0) { + if (isRet) // stk empty + { +- debugPos = 1; ++ debugPos = DEBUG_POS_1; + return 0; + } else { + funcStkMap[pid].emplace_back(functionIndex); +- debugPos = 2; ++ debugPos = DEBUG_POS_2; + } + } else { +- if (funcStkMap[pid][funcStkMap[pid].size() - 1] == functionIndex) // stk not empty +- { ++ if (funcStkMap[pid][funcStkMap[pid].size() - 1] == functionIndex) { // stk not empty + funcStkMap[pid].pop_back(); // match ,pop + if (funcStkMap[pid].size() > 0) { +- debugPos = 3; ++ debugPos = DEBUG_POS_3; + return funcStkMap[pid][funcStkMap[pid].size() - 1]; + } else { +- debugPos = 4; ++ debugPos = DEBUG_POS_4; + return 0; // can't find father function + } + } else { // function unmath , push + funcStkMap[pid].emplace_back(functionIndex); +- debugPos = 5; ++ debugPos = DEBUG_POS_5; + return funcStkMap[pid][funcStkMap[pid].size() - 2]; + } + } +@@ -140,18 +147,20 @@ void TimePair::timePairUpdateLoop(const int &pid, const int &functionIndex, cons + + if (timePairMap[pid].count(functionIndex) == 0) { + TimePairInfo infoTmp; ++ infoTmp.maxStartTimeInvaild = 0; ++ infoTmp.minEndTimeInvalid = INT_MAX; + timePairMap[pid].emplace(functionIndex, infoTmp); + } + + if (isRet) { +- if (timePairMap[pid][functionIndex].startTime.size() == 0) // fist is endtime ,startime=endtime +- { ++ if (timePairMap[pid][functionIndex].startTime.size() == 0) { // fist is endtime ,startime=endtime + timePairMap[pid][functionIndex].startTime.emplace_back(timestamp); + timePairMap[pid][functionIndex].childFuncTimes.emplace_back(0); + timePairMap[pid][functionIndex].strFunctionStk.emplace_back('.' + std::to_string(functionIndex)); + timePairMap[pid][functionIndex].fatherFunction.emplace_back(0); + timePairMap[pid][functionIndex].fatherFuncPos.emplace_back(-1); +- timePairMap[pid][functionIndex].isInvalid.emplace_back(1); // only have retval , invalid ++ timePairMap[pid][functionIndex].isInvalid.emplace_back(true); // only have retval , invalid ++ timePairMap[pid][functionIndex].maxStartTimeInvaild = timestamp; + } // Be careful when adding else branches. Only when there is no exit at the entrance, you will not be able to enter else + timePairMap[pid][functionIndex].endTime.emplace_back(timestamp); + if (line_info_tmp.args.size() != 0) { +@@ -162,13 +171,10 @@ void TimePair::timePairUpdateLoop(const int &pid, const int &functionIndex, cons + } else { + timePairMap[pid][functionIndex].startTime.emplace_back(timestamp); + timePairMap[pid][functionIndex].childFuncTimes.emplace_back(0); +- + std::string father_func_stk = fatherFunction != 0 ? \ + timePairMap[pid][fatherFunction].strFunctionStk[timePairMap[pid][fatherFunction].strFunctionStk.size() - 1] : ""; +- + std::string strFunctionStk = father_func_stk + '.' + std::to_string(functionIndex); + timePairMap[pid][functionIndex].strFunctionStk.emplace_back(strFunctionStk); +- + timePairMap[pid][functionIndex].fatherFunction.emplace_back(fatherFunction); + int fatherFuncPos = 0; + if (fatherFunction == 0) { +@@ -178,8 +184,7 @@ void TimePair::timePairUpdateLoop(const int &pid, const int &functionIndex, cons + timePairMap[pid][fatherFunction].childFuncTimes[fatherFuncPos]++; + } + timePairMap[pid][functionIndex].fatherFuncPos.emplace_back(fatherFuncPos); +- +- timePairMap[pid][functionIndex].isInvalid.emplace_back(0); ++ timePairMap[pid][functionIndex].isInvalid.emplace_back(false); + } + } + +@@ -199,6 +204,7 @@ void TimePair::timePairAlignment() + for (auto &processInfo : timePairMap) { + for (auto &funcInfo : processInfo.second) { + int diffLen = funcInfo.second.startTime.size() - funcInfo.second.endTime.size(); ++ bool updateEndTimeInvalid = false; + if (diffLen == 0) { + if (isOutputDebugFile) { + file << diffLen << "," << processInfo.first << " ," << funcInfo.first << " ," << \ +@@ -215,8 +221,7 @@ void TimePair::timePairAlignment() + } + } else { + if (isOutputDebugFile) { +- if (diffLen > 1) +- { ++ if (diffLen > 1) { + // A normal trace usually does not have a startTime greater than endtime dimension greater than 1, + // indicating that a function has not returned and has been pushed back onto the stack. + file << "run error(diffLen>1)!!!,"; +@@ -225,7 +230,12 @@ void TimePair::timePairAlignment() + funcInfo.second.startTime.size() << " ," << funcInfo.second.endTime.size() << std::endl; + } + for (int i = 0; i < diffLen; i++) { +- funcInfo.second.endTime.emplace_back(funcInfo.second.startTime[funcInfo.second.startTime.size() - diffLen + i]); ++ int endTime = funcInfo.second.startTime[funcInfo.second.startTime.size() - diffLen + i]; ++ funcInfo.second.endTime.emplace_back(endTime); ++ if (updateEndTimeInvalid == false) { ++ funcInfo.second.minEndTimeInvalid = endTime; ++ updateEndTimeInvalid = true; ++ } + } + } + } +@@ -236,52 +246,50 @@ void TimePair::timePairAlignment() + + void TimePair::timePairMarkInvalidData() + { +- // Find each function from front to back, find the first time pair that is not equal as the starting time point of the function, +- // and then compare the maximum of each function as the global starting time point of the pid +- // Find each function from the back to the front, find the first time pair that is not equal as the end time point of the function, +- // and then compare the smallest of each function as the global end point of the pid + for (auto &processInfo : timePairMap) { + int pid = processInfo.first; + VaildRange vr_tmp; + validTimeOfPid.emplace(pid, vr_tmp); +- int validStartTime = 0; +- int validEndTime = INT_MAX; ++ int validStartTime = INT_MAX; ++ int validEndTime = 0; ++ int maxInvalidStartTime = 0; ++ int minInvalidEndTime = INT_MAX; + ++ // maxInvalidStartTime choose max value of every func ++ for (const auto &funcInfo : processInfo.second) { ++ if (funcInfo.second.maxStartTimeInvaild > maxInvalidStartTime) { ++ maxInvalidStartTime = funcInfo.second.maxStartTimeInvaild; ++ } ++ if (funcInfo.second.minEndTimeInvalid < minInvalidEndTime) { ++ minInvalidEndTime = funcInfo.second.minEndTimeInvalid; ++ } ++ } ++ // [start, maxInvalidStartTime] and [minInvalidEndTime, end] data invalid + for (auto &funcInfo : processInfo.second) { + for (int i = 0; i < funcInfo.second.startTime.size(); i++) { +- if (funcInfo.second.endTime[i] - funcInfo.second.startTime[i] > 0) { +- if (funcInfo.second.startTime[i] > validStartTime) { +- validStartTime = funcInfo.second.startTime[i]; +- } +- break; ++ if (funcInfo.second.startTime[i] <= maxInvalidStartTime) { ++ funcInfo.second.isInvalid[i] = true; ++ } ++ if (funcInfo.second.endTime[i] >= minInvalidEndTime) { ++ funcInfo.second.isInvalid[i] = true; + } + } ++ } + +- for (int i = funcInfo.second.startTime.size() - 1; i >= 0; i--) { +- if (funcInfo.second.endTime[i] - funcInfo.second.startTime[i] > 0) { +- if (funcInfo.second.endTime[i] < validEndTime) { ++ for (const auto &funcInfo : processInfo.second) { ++ for (int i = 0; i < funcInfo.second.startTime.size(); i++) { ++ if (funcInfo.second.isInvalid[i] != true) { ++ if (funcInfo.second.startTime[i] <= validStartTime) { ++ validStartTime = funcInfo.second.startTime[i]; ++ } ++ if (funcInfo.second.endTime[i] >= validEndTime) { + validEndTime = funcInfo.second.endTime[i]; + } +- break; + } + } + } +- +- + validTimeOfPid[pid].validStartTime = validStartTime; + validTimeOfPid[pid].validEndTime = validEndTime; +- +- // [validStartTime,validEndTime] out range invalid +- for (auto &funcInfo : processInfo.second) { +- for (int i = 0; i < funcInfo.second.startTime.size(); i++) { +- if (funcInfo.second.startTime[i] < validStartTime) { +- funcInfo.second.isInvalid[i] = 1; +- } +- if (funcInfo.second.endTime[i] > validEndTime) { +- funcInfo.second.isInvalid[i] = 1; +- } +- } +- } + } + + Config &cfg = Config::getInstance(); +@@ -340,49 +348,69 @@ void TimePair::timePairMatching() + file.close(); + } + ++void TimePair::functionDelayUpdate() ++{ ++ for (auto &processInfo : timePairMap) { ++ for (auto &funcInfo : processInfo.second) { ++ for (int i = 0; i < funcInfo.second.startTime.size(); i++) { ++ funcInfo.second.delay.emplace_back(funcInfo.second.endTime[i] - funcInfo.second.startTime[i]); ++ } ++ } ++ } ++} ++ + void TimePair::functionStatisticsAnalysis() + { + for (auto &processInfo : timePairMap) { + for (auto &funcInfo : processInfo.second) { +- int maxDelay = 0; +- int minDelay = INT_MAX; +- int delaySum = 0; +- int maxDelayPos = 0; +- int minDelayPos = 0; +- int len = funcInfo.second.startTime.size(); +- int valid_len = 0; ++ std::vector delayTmp[DELAY_INFO_MAX]; ++ int len = funcInfo.second.delay.size(); ++ int delaySum[DELAY_INFO_MAX] = { 0 }; + for (int i = 0; i < len; i++) { +- +- int delay = funcInfo.second.endTime[i] - funcInfo.second.startTime[i]; +- funcInfo.second.delay.emplace_back(delay); +- int isInvalid = funcInfo.second.isInvalid[i]; +- if (isInvalid) { ++ if (funcInfo.second.isInvalid[i]) { + continue; + } +- +- if (maxDelay < delay) { +- maxDelay = delay; +- maxDelayPos = i; ++ int delay = funcInfo.second.delay[i]; ++ delayTmp[DELAY_INFO_ALL].emplace_back(delay); ++ delaySum[DELAY_INFO_ALL] += delay; ++ if ((int)funcInfo.second.retVal[i] < 0) { ++ delayTmp[DELAY_INFO_RETVAL_LESS_ZERO].emplace_back(delay); ++ delaySum[DELAY_INFO_RETVAL_LESS_ZERO] += delay; ++ } else { ++ delayTmp[DELAY_INFO_RETVAL_GEOREQ_ZERO].emplace_back(delay); ++ delaySum[DELAY_INFO_RETVAL_GEOREQ_ZERO] += delay; + } +- if (minDelay > delay) { +- minDelay = delay; +- minDelayPos = i; ++ } ++ for (int i = 0; i < DELAY_INFO_MAX; i++) { ++ DELAY_INFO_E type = (DELAY_INFO_E)i; ++ sort(delayTmp[type].begin(), delayTmp[type].end()); ++ int valid_len = delayTmp[type].size(); ++ if (valid_len > 0) { ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_SUM] = delaySum[type]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_MIN] = delayTmp[type][0]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_MAX] = delayTmp[type][valid_len - 1]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P50] = delayTmp[type][ceil(0.50 * valid_len) - 1]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P80] = delayTmp[type][ceil(0.80 * valid_len) - 1]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P95] = delayTmp[type][ceil(0.95 * valid_len) - 1]; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P99] = delayTmp[type][ceil(0.99 * valid_len) - 1]; ++ funcInfo.second.summary.callTimes[type] = valid_len; ++ funcInfo.second.summary.aveDelay[type] = delaySum[type] * 1.0 / valid_len; ++ } else { ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_SUM] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_MIN] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_MAX] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P50] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P80] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P95] = 0; ++ funcInfo.second.summary.delay[type][DELAY_SUMMARY_P99] = 0; ++ funcInfo.second.summary.callTimes[type] = 0; ++ funcInfo.second.summary.aveDelay[type] = 0; + } +- +- delaySum += delay; +- valid_len++; + } +- +- funcInfo.second.aveDelay = valid_len == 0 ? 0.0 : delaySum * 1.0 / valid_len; +- funcInfo.second.minDelay = minDelay; +- funcInfo.second.maxDelay = maxDelay; +- funcInfo.second.maxDelayPos = maxDelayPos; +- funcInfo.second.minDelayPos = minDelayPos; +- funcInfo.second.delaySum = delaySum; +- funcInfo.second.callTimes = valid_len; + } + } + } ++ + void TimePair::saveTimePairToFile() + { + Config &cfg = Config::getInstance(); +@@ -454,38 +482,39 @@ void TimePair::saveTimePairToFile() + void TimePair::saveDelayInfoToFile() + { + Config &cfg = Config::getInstance(); +- if (cfg.getDebugLevel() < DEBUG_LEVEL_1) { +- return; +- } + std::ofstream file(cfg.filename[FILE_TYPE_OUTPUT_DELAY], std::ios::out | std::ios::trunc); + if (!file) { + std::cout << "file open failed:" << cfg.filename[FILE_TYPE_OUTPUT_DELAY] << std::endl; + return; + } + +- bool is_filter = true; +- if (cfg.getDebugLevel() < DEBUG_LEVEL_3) { +- is_filter = false; +- } + TraceResolve &trace_resolve_inst = TraceResolve::getInstance(); ++ file << "note : (r>=0) => (int)return value >=0; ave => average delay,"; ++ file << "pid,function,"; ++ file << "call_times,ave,sum,min,max,p50,p80,p95,p99,"; ++ file << "call_times(r>=0),ave(r>=0),sum(r>=0),min(r>=0),max(r>=0),p50(r>=0),p80(r>=0),p95(r>=0),p99(r>=0),"; ++ file << "call_times(r<0),ave(r<0),sum(r<0),min(r<0),max(r<0),p50(r<0),p80(r<0),p95(r<0),p99(r<0),"; ++ file << std::endl; + for (const auto &processInfo : timePairMap) { + for (const auto &funcInfo : processInfo.second) { +- if (!is_filter || (cfg.filterCfgMap.size() != 0 && cfg.filterCfgMap.count(processInfo.first) == 0)) { ++ if (cfg.filterCfgMap.size() != 0 && cfg.filterCfgMap.count(processInfo.first) == 0) { + continue; + } +- file << "pid:" << processInfo.first << "," << std::endl; +- file << "functionIndex:" << funcInfo.first << "," << cfg.IndexToFunction[funcInfo.first] << std::endl; +- +- file << "aveDelay:" << funcInfo.second.aveDelay << std::endl; +- file << "maxDelay:" << funcInfo.second.maxDelay << std::endl; +- file << "minDelay:" << funcInfo.second.minDelay << std::endl; +- file << "delaySum:" << funcInfo.second.delaySum << std::endl; +- +- file << "call times:" << funcInfo.second.callTimes << std::endl; +- file << "max_delay_at:" << std::fixed << std::setprecision(6) << \ +- trace_resolve_inst.convertTimeIntToDouble(funcInfo.second.startTime[funcInfo.second.maxDelayPos]) << std::endl; +- file << "min_delay_at:" << std::fixed << std::setprecision(6) << \ +- trace_resolve_inst.convertTimeIntToDouble(funcInfo.second.startTime[funcInfo.second.minDelayPos]) << std::endl; ++ if (funcInfo.second.summary.callTimes[DELAY_INFO_ALL] <= 0) { ++ continue; ++ } ++ file << "," << processInfo.first << ","; ++ file << cfg.IndexToFunction[funcInfo.first] << ","; ++ ++ for (int i = 0; i < DELAY_INFO_MAX; i++) { ++ DELAY_INFO_E infoType = (DELAY_INFO_E)i; ++ file << funcInfo.second.summary.callTimes[infoType] << ","; ++ file << std::fixed << std::setprecision(3) << funcInfo.second.summary.aveDelay[infoType] << ","; ++ for (int j = 0; j < DELAY_SUMMARY_ENUM_MAX; j++) { ++ DELAY_SUMMARY_E summaryType = (DELAY_SUMMARY_E)j; ++ file << funcInfo.second.summary.delay[infoType][summaryType] << ","; ++ } ++ } + file << std::endl; + } + } +@@ -493,6 +522,7 @@ void TimePair::saveDelayInfoToFile() + file.close(); + } + ++ + int TimePair::getProcessValidTime(const int &pid) + { + if (validTimeOfPid.count(pid) != 0) { +@@ -502,6 +532,7 @@ int TimePair::getProcessValidTime(const int &pid) + } + + } ++ + void TimePair::timePairAnalysis() + { + // step 1 : convert trace to time pair +@@ -511,10 +542,9 @@ void TimePair::timePairAnalysis() + // step 3 : mark date whether invalid + timePairMarkInvalidData(); + // step 4: compute statistics rst ++ functionDelayUpdate(); + functionStatisticsAnalysis(); +- + // step 5: save rst + saveTimePairToFile(); + saveDelayInfoToFile(); +- + } +\ No newline at end of file +diff --git a/extra-tools/da-tool/analysis/time_pair.h b/extra-tools/da-tool/analysis/time_pair.h +index 70dee67..9d3e757 100644 +--- a/extra-tools/da-tool/analysis/time_pair.h ++++ b/extra-tools/da-tool/analysis/time_pair.h +@@ -27,6 +27,31 @@ public: + int validEndTime; + }; + ++typedef enum { ++ DELAY_SUMMARY_SUM, ++ DELAY_SUMMARY_MIN, ++ DELAY_SUMMARY_MAX, ++ DELAY_SUMMARY_P50, ++ DELAY_SUMMARY_P80, ++ DELAY_SUMMARY_P95, ++ DELAY_SUMMARY_P99, ++ DELAY_SUMMARY_ENUM_MAX, ++} DELAY_SUMMARY_E; ++ ++typedef enum { ++ DELAY_INFO_ALL, ++ DELAY_INFO_RETVAL_GEOREQ_ZERO, // ret>=0 ++ DELAY_INFO_RETVAL_LESS_ZERO, // ret<0 ++ DELAY_INFO_MAX, ++} DELAY_INFO_E; ++ ++class TimePairSummary { ++public: ++ double aveDelay[DELAY_INFO_MAX]; ++ int callTimes[DELAY_INFO_MAX]; ++ int delay[DELAY_INFO_MAX][DELAY_SUMMARY_ENUM_MAX]; ++}; ++ + class TimePairInfo { + public: + // The time relative to the integer time of the first trace , Unit: +@@ -42,11 +67,15 @@ public: + std::vector fatherFuncPos; + std::vector childFuncTimes; // Number of calls to other functions. + std::vector retVal; // return value +- std::vector isInvalid; // isInvalid=true indicates that there is no ++ std::vector isInvalid; // isInvalid=true indicates that there is no + // complete call stack data + std::vector strFunctionStk; + ++ int maxStartTimeInvaild; ++ int minEndTimeInvalid; ++ + // analysis result ++ TimePairSummary summary; + double aveDelay; + int maxDelay; + int minDelay; +@@ -86,6 +115,7 @@ private: + const int &functionIndex, const int &isRet, + const int ×tamp, const int &fatherFunction, + const int &debugPos); ++ void functionDelayUpdate(); + void functionStatisticsAnalysis(); + + void timePairMatching(); +diff --git a/extra-tools/da-tool/analysis/trace_resolve.cpp b/extra-tools/da-tool/analysis/trace_resolve.cpp +index 8224346..8424464 100644 +--- a/extra-tools/da-tool/analysis/trace_resolve.cpp ++++ b/extra-tools/da-tool/analysis/trace_resolve.cpp +@@ -95,7 +95,7 @@ void TraceResolve::resolveTrace() + while (getline(file, line)) { + line_num++; + if (line_num % 10000 == 0) { +- std::cout << "resolve:" << line_num << " lines," << regex_num << " lines match " << std::endl; ++ std::cout << regex_num << "/" << line_num << " (matched/lines)" << std::endl; + } + if (line_num < cfg.readTraceBegin) { + continue; +@@ -141,8 +141,7 @@ void TraceResolve::resolveTrace() + } + + if (isMatch) { +- if (isFirstMatch) +- { ++ if (isFirstMatch) { + startTimeIntPart = std::stoi(match[TRACE_INFO_TIMESTAMP_INT].str()); + isFirstMatch = false; + } +diff --git a/extra-tools/da-tool/analysis/trace_resolve.h b/extra-tools/da-tool/analysis/trace_resolve.h +index feec87f..df330d5 100644 +--- a/extra-tools/da-tool/analysis/trace_resolve.h ++++ b/extra-tools/da-tool/analysis/trace_resolve.h +@@ -38,13 +38,6 @@ typedef enum + PROCESS_STATE_MAX, + } PROCESS_STATE_E; + +-typedef enum { +- LINE_TYPE_FUNC, +- LINE_TYPE_SCHED_SWITCH, +- LINE_TYPE_SCHED_SWITCH_RET, +- LINE_TYPE_MAX, +-} LINE_TYPE_E; +- + class SchedSwitchLine { + public: + int prevPid; +diff --git a/extra-tools/da-tool/conf/da-tool.conf b/extra-tools/da-tool/conf/da-tool.conf +index df1560a..4c823bc 100644 +--- a/extra-tools/da-tool/conf/da-tool.conf ++++ b/extra-tools/da-tool/conf/da-tool.conf +@@ -1,12 +1,12 @@ + # /etc/da-tool.conf + +-# kernel symbol config +-k,ksys_write,udp_recvmsg,udp_sendmsg,dev_queue_xmit,udp_send_skb,sock_recvmsg,__skb_recv_udp,udp_rcv ++# kernel symbol config (ref: /proc/kallsyms) ++k,udp_recvmsg,udp_sendmsg,dev_queue_xmit,udp_send_skb,sock_recvmsg,__skb_recv_udp,udp_rcv + + # sched config + s,sched_switch + + # user symbol config (format : u,path,bin_name,func1,func2,...,funcN) +-#u,/path/,bin_name,sendto ++# u,/path/,bin_name,sendto + + # end +diff --git a/extra-tools/da-tool/main.cpp b/extra-tools/da-tool/main.cpp +index 365b1f3..b6ec46d 100644 +--- a/extra-tools/da-tool/main.cpp ++++ b/extra-tools/da-tool/main.cpp +@@ -24,23 +24,18 @@ int main(int argc, char *argv[]) + cout << "analysis start..." << endl; + Config &cfg = Config::getInstance(); + cfg.configInit(argc, argv); +- cout << "analysis Config completed" << endl; +- ++ cout << "analysis resolve..." << endl; + TraceResolve &trace_resolve_inst = TraceResolve::getInstance(); + trace_resolve_inst.trace_resolve_proc(); +- cout << "analysis resolve completed" << endl; + + TimePair &tpInst = TimePair::getInstance(); + tpInst.timePairAnalysis(); +- cout << "analysis time pair completed" << endl; + + SchedAnalysis &schedAnalysisInst = SchedAnalysis::getInstance(); + schedAnalysisInst.schedAnalysisProc(); +- cout << "analysis sched completed" << endl; + + FunctionStack &fstk = FunctionStack::getInstance(); + fstk.function_stack_proc(); +- cout << "analysis FunctionStack completed" << endl; + cout << "analysis finish" << endl; + return 0; + } +\ No newline at end of file +diff --git a/extra-tools/da-tool/script/da-tool.sh b/extra-tools/da-tool/script/da-tool.sh +index 8329a15..ccc3443 100644 +--- a/extra-tools/da-tool/script/da-tool.sh ++++ b/extra-tools/da-tool/script/da-tool.sh +@@ -252,7 +252,7 @@ function clear_env() { + function sample_init() { + echo 0 >/sys/kernel/debug/tracing/tracing_on + echo >/sys/kernel/debug/tracing/trace +- echo 4096 >/sys/kernel/debug/tracing/buffer_size_kb ++ echo 40960 >/sys/kernel/debug/tracing/buffer_size_kb + + echo >/sys/kernel/debug/tracing/uprobe_events + echo >/sys/kernel/debug/tracing/kprobe_events +diff --git a/extra-tools/da-tool/test/case/case1/case1.cpp b/extra-tools/da-tool/test/case/case1/case1.cpp +new file mode 100644 +index 0000000..1e16f7c +--- /dev/null ++++ b/extra-tools/da-tool/test/case/case1/case1.cpp +@@ -0,0 +1,64 @@ ++#include ++#include ++ ++using namespace std; ++ ++void delay_1us() ++{ ++ usleep(1); ++} ++ ++void delay_10us() ++{ ++ usleep(10); ++} ++ ++void delay_1ms() ++{ ++ usleep(1000); ++} ++ ++void delay_10ms() ++{ ++ usleep(10000); ++} ++ ++void funcC() ++{ ++ for (int i = 0; i < 1000; i++) { ++ } ++} ++void funcB() ++{ ++ for (int i = 0; i < 100; i++) { ++ for (int j = 0; j < 1000; j++) { ++ } ++ funcC(); ++ } ++} ++void funcA() ++{ ++ for (int i = 0; i < 100; i++) { ++ funcB(); ++ } ++} ++ ++int main() ++{ ++ int loopnum = 0; ++ while (1) { ++ cout << "loopnum:" << loopnum << endl; ++ loopnum++; ++ delay_10us(); ++ delay_1us(); ++ delay_1ms(); ++ delay_10ms(); ++ funcA(); ++ funcB(); ++ funcC(); ++ } ++ return 0; ++} ++ ++// g++ case1.cpp -o case1 ++// _Z9delay_1usv,_Z10delay_10usv,_Z9delay_1msv,_Z10delay_10msv,_Z5funcCv,_Z5funcBv,_Z5funcAv +\ No newline at end of file +diff --git a/extra-tools/da-tool/test/case/case2/case2_udp_cli.c b/extra-tools/da-tool/test/case/case2/case2_udp_cli.c +new file mode 100644 +index 0000000..80f9dd7 +--- /dev/null ++++ b/extra-tools/da-tool/test/case/case2/case2_udp_cli.c +@@ -0,0 +1,37 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SERVER_IP "127.0.0.1" ++#define SERVER_PORT 12345 ++ ++int main() { ++ int sockfd; ++ struct sockaddr_in server_addr; ++ char buffer[1024]; ++ ++ sockfd = socket(AF_INET, SOCK_DGRAM, 0); ++ if (sockfd < 0) { ++ perror("socket creation failed"); ++ exit(EXIT_FAILURE); ++ } ++ ++ memset(&server_addr, 0, sizeof(server_addr)); ++ server_addr.sin_family = AF_INET; ++ server_addr.sin_addr.s_addr = inet_addr(SERVER_IP); ++ server_addr.sin_port = htons(SERVER_PORT); ++ ++ int loop_num =0; ++ while (1) { ++ usleep(50000); ++ sprintf(buffer, "loop num : %d", loop_num++); ++ sendto(sockfd, buffer, strlen(buffer), 0, (const struct sockaddr *)&server_addr, sizeof(server_addr)); ++ } ++ ++ close(sockfd); ++ return 0; ++} +diff --git a/extra-tools/da-tool/test/case/case2/case2_udp_ser_noblk.c b/extra-tools/da-tool/test/case/case2/case2_udp_ser_noblk.c +new file mode 100644 +index 0000000..a24841d +--- /dev/null ++++ b/extra-tools/da-tool/test/case/case2/case2_udp_ser_noblk.c +@@ -0,0 +1,53 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SERVER_PORT 12345 ++ ++int main() { ++ int sockfd; ++ struct sockaddr_in server_addr, client_addr; ++ socklen_t client_len; ++ char buffer[1024]; ++ ++ sockfd = socket(AF_INET, SOCK_DGRAM, 0); ++ if (sockfd < 0) { ++ perror("socket creation failed"); ++ exit(EXIT_FAILURE); ++ } ++ ++ // no blk ++ int flags = fcntl(sockfd, F_GETFL, 0); ++ fcntl(sockfd, F_SETFL, flags | O_NONBLOCK); ++ ++ memset(&server_addr, 0, sizeof(server_addr)); ++ server_addr.sin_family = AF_INET; ++ server_addr.sin_addr.s_addr = INADDR_ANY; ++ server_addr.sin_port = htons(SERVER_PORT); ++ ++ if (bind(sockfd, (const struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { ++ perror("bind failed"); ++ exit(EXIT_FAILURE); ++ } ++ ++ while (1) { ++ memset(buffer, 0, sizeof(buffer)); ++ client_len = sizeof(client_addr); ++ ssize_t recv_len = recvfrom(sockfd, buffer, sizeof(buffer) - 1, MSG_DONTWAIT, (struct sockaddr *)&client_addr, &client_len); ++ ++ if (recv_len > 0) { ++ buffer[recv_len] = '\0'; ++ printf("recv from %s:%d data: %s recv_len=%d\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port), buffer, recv_len); ++ } else { ++ printf("recv_len=%d\n", recv_len); ++ } ++ } ++ ++ close(sockfd); ++ return 0; ++} +-- +2.33.0 + diff --git a/0005-add-specific-error-information.patch b/0005-add-specific-error-information.patch new file mode 100644 index 0000000..05eb8c7 --- /dev/null +++ b/0005-add-specific-error-information.patch @@ -0,0 +1,82 @@ +From 5b0551698b60ea0c01ad9ee223f7009f230caa12 Mon Sep 17 00:00:00 2001 +From: wang-guangge +Date: Mon, 20 Nov 2023 20:51:57 +0800 +Subject: [PATCH] add specific error information + +--- + hotpatch/hotupgrade.py | 9 ++++++--- + hotpatch/upgrade_en.py | 8 +++++--- + 2 files changed, 11 insertions(+), 6 deletions(-) + +diff --git a/hotpatch/hotupgrade.py b/hotpatch/hotupgrade.py +index c508e07..5dfee0d 100644 +--- a/hotpatch/hotupgrade.py ++++ b/hotpatch/hotupgrade.py +@@ -12,17 +12,19 @@ + # ******************************************************************************/ + from __future__ import print_function + ++from time import sleep ++ + import dnf.base + import dnf.exceptions + import hawkey +-from time import sleep + from dnf.cli import commands + from dnf.cli.option_parser import OptionParser + from dnfpluginscore import _, logger +-from .upgrade_en import UpgradeEnhanceCommand ++ + from .hot_updateinfo import HotUpdateinfoCommand +-from .updateinfo_parse import HotpatchUpdateInfo + from .syscare import Syscare ++from .updateinfo_parse import HotpatchUpdateInfo ++from .upgrade_en import UpgradeEnhanceCommand + from .version import Versions + + EMPTY_TAG = "-" +@@ -184,6 +186,7 @@ class HotupgradeCommand(dnf.cli.Command): + output, status = self.syscare.apply(hp_subname) + if status: + logger.info(_('Apply hot patch failed: %s.'), hp_subname) ++ logger.info(_('%s'), output) + else: + logger.info(_('Apply hot patch succeed: %s.'), hp_subname) + return status +diff --git a/hotpatch/upgrade_en.py b/hotpatch/upgrade_en.py +index 266bcae..3053179 100644 +--- a/hotpatch/upgrade_en.py ++++ b/hotpatch/upgrade_en.py +@@ -10,13 +10,14 @@ + # PURPOSE. + # See the Mulan PSL v2 for more details. + # ******************************************************************************/ +-import dnf + import gzip + import subprocess +-from dnfpluginscore import _ ++ ++import dnf + from dnf.cli import commands + from dnf.cli.commands.upgrade import UpgradeCommand + from dnf.cli.option_parser import OptionParser ++from dnfpluginscore import _, logger + + SUCCEED = 0 + FAIL = 255 +@@ -111,10 +112,11 @@ class UpgradeEnhanceCommand(dnf.cli.Command): + output, return_code = cmd_output(remove_cmd) + if return_code != SUCCEED: + print('Remove package failed: %s.' % pkg) ++ print(output) + exit(1) + else: + print('Remove package succeed: %s.' % pkg) +- # do not achieve the expected result of installing related kernel rpm ++ # do not achieve the expected result of installing related rpm + exit(1) + + def rebuild_rpm_db(self): +-- +2.27.0 + diff --git a/0006-update-return-log-field-of-the-cve-fix-func.patch b/0006-update-return-log-field-of-the-cve-fix-func.patch new file mode 100644 index 0000000..98e5187 --- /dev/null +++ b/0006-update-return-log-field-of-the-cve-fix-func.patch @@ -0,0 +1,63 @@ +From 27df2fbb6c18c382e7099015915f7efb673a9e06 Mon Sep 17 00:00:00 2001 +From: rabbitali +Date: Tue, 21 Nov 2023 09:02:09 +0800 +Subject: [PATCH] update return log field of the cve fix func + +--- + ceres/manages/vulnerability_manage.py | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +diff --git a/ceres/manages/vulnerability_manage.py b/ceres/manages/vulnerability_manage.py +index da98297..23ae2ce 100644 +--- a/ceres/manages/vulnerability_manage.py ++++ b/ceres/manages/vulnerability_manage.py +@@ -621,11 +621,12 @@ class VulnerabilityManage: + a tuple containing two elements (update result, log). + """ + code, stdout, stderr = execute_shell_command(f"dnf upgrade-en {rpm_name} -y") ++ log = stdout + stderr + if code != CommandExitCode.SUCCEED: +- return TaskExecuteRes.FAIL, stderr +- if "Complete" not in stdout: +- return TaskExecuteRes.FAIL, stdout +- return TaskExecuteRes.SUCCEED, stdout ++ return TaskExecuteRes.FAIL, log ++ if "Complete" not in log: ++ return TaskExecuteRes.FAIL, log ++ return TaskExecuteRes.SUCCEED, log + + def _update_hotpatch_by_dnf_plugin(self, hotpatch_pkg: str) -> Tuple[str, str]: + """ +@@ -645,22 +646,23 @@ class VulnerabilityManage: + update_command = f"dnf hotupgrade {hotpatch_pkg} -y" + + code, stdout, stderr = execute_shell_command(update_command) ++ log = stdout + stderr + if code != CommandExitCode.SUCCEED: +- return TaskExecuteRes.FAIL, stderr ++ return TaskExecuteRes.FAIL, log + + if "Apply hot patch succeed" not in stdout and "No hot patches marked for install" not in stdout: +- return TaskExecuteRes.FAIL, stdout ++ return TaskExecuteRes.FAIL, log + + if not self.takeover and self.accepted: + try: + hotpatch_name = hotpatch_pkg.rsplit(".", 1)[0].split("-", 1)[1] +- _, log = self._set_hotpatch_status_by_dnf_plugin(hotpatch_name, "accept") +- stdout += f"\n\n{log}" ++ _, hotpatch_status_set_log = self._set_hotpatch_status_by_dnf_plugin(hotpatch_name, "accept") ++ log += f"\n\n{hotpatch_status_set_log}" + except IndexError as error: + LOGGER.error(error) +- stdout += f"\n\nhotpatch status set failed due to can't get correct hotpatch name!" ++ log += f"\n\nhotpatch status set failed due to can't get correct hotpatch name!" + +- return TaskExecuteRes.SUCCEED, stdout ++ return TaskExecuteRes.SUCCEED, log + + @staticmethod + def _set_hotpatch_status_by_dnf_plugin(hotpatch: str, operation: str) -> Tuple[bool, str]: +-- +2.33.0 + diff --git a/aops-ceres-v1.3.2.tar.gz b/aops-ceres-v1.3.2.tar.gz deleted file mode 100644 index 6039320..0000000 Binary files a/aops-ceres-v1.3.2.tar.gz and /dev/null differ diff --git a/aops-ceres-v1.3.4.tar.gz b/aops-ceres-v1.3.4.tar.gz new file mode 100644 index 0000000..b99f0f8 Binary files /dev/null and b/aops-ceres-v1.3.4.tar.gz differ diff --git a/aops-ceres.spec b/aops-ceres.spec index f7afbf2..09916b5 100644 --- a/aops-ceres.spec +++ b/aops-ceres.spec @@ -1,15 +1,22 @@ +%define datool_with_testing 0 + Name: aops-ceres -Version: v1.3.2 -Release: 1 +Version: v1.3.4 +Release: 8 Summary: An agent which needs to be adopted in client, it managers some plugins, such as gala-gopher(kpi collection), fluentd(log collection) and so on. License: MulanPSL2 URL: https://gitee.com/openeuler/%{name} Source0: %{name}-%{version}.tar.gz - +Patch0001: 0001-support-kabi-check.patch +Patch0002: 0002-modify-re-of-kernel-filter.patch +Patch0003: 0003-fix-bug-in-test_hotpatch.py.patch +Patch0004: 0004-Modify-method-of-mark-invalid-data-and-add-summary.patch +Patch0005: 0005-add-specific-error-information.patch +Patch0006: 0006-update-return-log-field-of-the-cve-fix-func.patch BuildRequires: python3-setuptools Requires: python3-requests python3-jsonschema python3-libconf -Requires: python3-concurrent-log-handler dmidecode +Requires: python3-concurrent-log-handler dmidecode dnf-hotpatch-plugin >= v1.3.4 Provides: aops-ceres Conflicts: aops-agent @@ -17,18 +24,55 @@ Conflicts: aops-agent %description An agent which needs to be adopted in client, it managers some plugins, such as gala-gopher(kpi collection), fluentd(log collection) and so on. +%package -n dnf-hotpatch-plugin +Summary: dnf hotpatch plugin +Requires: python3-hawkey python3-dnf syscare >= 1.1.0 + +%description -n dnf-hotpatch-plugin +dnf hotpatch plugin, it's about hotpatch query and fix + +%package -n da-tool +Summary: da-tool is a sampling and analysis tool for function delay. +BuildRequires: gcc-c++ cmake make +Requires: perf + +%description -n da-tool +da-tool is a sampling and analysis tool for function delay. %prep -%autosetup -n %{name}-%{version} +%autosetup -n %{name}-%{version} -p1 # build for aops-ceres %py3_build +# build for da-tool +cd ./extra-tools/da-tool +mkdir build +cd build + +%if 0%{?datool_with_testing} +%define da_cmake_test_args -DCMAKE_BUILD_TYPE=Debug -DWITH_DEBUG=ON +%else +%define da_cmake_test_args -DCMAKE_BUILD_TYPE=Release -DWITH_DEBUG=OFF +%endif + +cmake .. %{?da_cmake_test_args} +make +cd ../../../ # install for aops-ceres %py3_install +# install for aops-dnf-plugin +cp -r hotpatch %{buildroot}/%{python3_sitelib}/dnf-plugins/ + +# install for da-tool +mkdir -p ${RPM_BUILD_ROOT}%{_bindir} +mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir} +install -b -m640 ./extra-tools/da-tool/conf/da-tool.conf ${RPM_BUILD_ROOT}%{_sysconfdir}/ +install -b -m755 ./extra-tools/da-tool/build/da-tool-analysis ${RPM_BUILD_ROOT}%{_bindir}/ +install -b -m755 ./extra-tools/da-tool/script/da-tool.sh ${RPM_BUILD_ROOT}%{_bindir}/ %files %doc README.* @@ -38,10 +82,46 @@ An agent which needs to be adopted in client, it managers some plugins, such as %{python3_sitelib}/ceres/* %{_bindir}/aops-ceres +%files -n dnf-hotpatch-plugin +%{python3_sitelib}/dnf-plugins/* + +%files -n da-tool +%defattr (-, root, root) +%config(noreplace) %{_sysconfdir}/da-tool.conf +%attr(755, root, root) %{_bindir}/da-tool.sh +%attr(755, root, root) %{_bindir}/da-tool-analysis %changelog -* Wed Sep 20 2023 wenxin - v1.3.2-1 -- fix query fixed cves info error by dnf +* Tue Nov 21 2023 wenxin - v1.3.4-8 +- update return log field of the cve fix func +- add specific error information + +* Mon Nov 20 2023 liuchanggeng - v1.3.4-7 +- new patch for da-tool +- add summary report of schedswitch and delay +- modify method of mark invalid data and expanding valid data range +- adjusted the format of the output data + +* Wed Nov 15 2023 wangguangge - v1.3.4-6 +- fix bug in test_hotpatch.py + +* Tue Nov 14 2023 liuchanggeng - v1.3.4-5 +- update buildrequires of da-tool + +* Tue Nov 14 2023 wangguangge - v1.3.4-4 +- add require information of dnf-hotpatch-plugin + +* Tue Nov 14 2023 gongzhengtang - v1.3.4-3 +- modify the regular expression of kernel filter + +* Mon Nov 13 2023 wangguangge - v1.3.4-2 +- support kabi check for dnf-hotpatch-plugin + +* Sun Nov 12 2023 liuchanggeng - v1.3.4-1 +- update to v1.3.4 +- bugfix: fix the bug that when multiple kernel versions coexist, the correct highest version cannot be identified +- add query file list function +- add sub package da-tool * Tue Sep 19 2023 wenxin - v1.3.1-5 - update func about querying applied hotpatch info